code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return max(metric_fn(lowerCamelCase_ , lowerCamelCase_ ) for gt in ground_truths )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Optional[Any] = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
__magic_name__ : Any = []
if args.gold_data_mode == "qa":
__magic_name__ : str = pd.read_csv(lowerCamelCase_ , sep="\t" , header=lowerCamelCase_ )
for answer_list in data[1]:
__magic_name__ : List[str] = ast.literal_eval(lowerCamelCase_ )
answers.append(lowerCamelCase_ )
else:
__magic_name__ : List[str] = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
__magic_name__ : Optional[Any] = [[reference] for reference in references]
__magic_name__ : Tuple = 0
for prediction, ground_truths in zip(lowerCamelCase_ , lowerCamelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
fa += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : List[Any] = 100.0 * em / total
__magic_name__ : int = 100.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : str = args.k
__magic_name__ : Optional[Any] = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
__magic_name__ : Tuple = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
__magic_name__ : Optional[Any] = 0
for hypo, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Dict = set(hypo.split("\t" )[:k] )
__magic_name__ : List[str] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__magic_name__ : List[Any] = 100.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
def strip_title(UpperCamelCase__ ):
if title.startswith("\"" ):
__magic_name__ : List[str] = title[1:]
if title.endswith("\"" ):
__magic_name__ : str = title[:-1]
return title
__magic_name__ : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors="pt" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )['input_ids'].to(args.device )
__magic_name__ : Union[str, Any] = rag_model.rag.question_encoder(lowerCamelCase_ )
__magic_name__ : List[Any] = question_enc_outputs[0]
__magic_name__ : str = rag_model.retriever(
lowerCamelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
__magic_name__ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__magic_name__ : List[Any] = []
for docs in all_docs:
__magic_name__ : Tuple = [strip_title(lowerCamelCase_ ) for title in docs['title']]
provenance_strings.append("\t".join(lowerCamelCase_ ) )
return provenance_strings
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
__magic_name__ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors="pt" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
__magic_name__ : int = inputs_dict.input_ids.to(args.device )
__magic_name__ : Dict = inputs_dict.attention_mask.to(args.device )
__magic_name__ : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCamelCase_ , attention_mask=lowerCamelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__magic_name__ : Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
if args.print_predictions:
for q, a in zip(lowerCamelCase_ , lowerCamelCase_ ):
logger.info("Q: {} - A: {}".format(lowerCamelCase_ , lowerCamelCase_ ) )
return answers
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=lowerCamelCase_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=lowerCamelCase_ , choices=["exact", "compressed", "legacy"] , type=lowerCamelCase_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=lowerCamelCase_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=lowerCamelCase_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=lowerCamelCase_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=lowerCamelCase_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=lowerCamelCase_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=lowerCamelCase_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=lowerCamelCase_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=lowerCamelCase_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=lowerCamelCase_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
__magic_name__ : Tuple = parser.parse_args()
__magic_name__ : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = {}
if args.model_type is None:
__magic_name__ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
__magic_name__ : Union[str, Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__magic_name__ : List[Any] = args.n_docs
if args.index_name is not None:
__magic_name__ : Union[str, Any] = args.index_name
if args.index_path is not None:
__magic_name__ : Union[str, Any] = args.index_path
else:
__magic_name__ : List[Any] = BartForConditionalGeneration
__magic_name__ : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , lowerCamelCase_ )
__magic_name__ : Tuple = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__magic_name__ : int = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(lowerCamelCase_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
__magic_name__ : Union[str, Any] = RagRetriever.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : List[Any] = model_class.from_pretrained(lowerCamelCase_ , retriever=lowerCamelCase_ , **lowerCamelCase_ )
model.retriever.init_retrieval()
else:
__magic_name__ : List[str] = model_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
__magic_name__ : List[Any] = []
for line in tqdm(lowerCamelCase_ ):
questions.append(line.strip() )
if len(lowerCamelCase_ ) == args.eval_batch_size:
__magic_name__ : Tuple = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write("\n".join(lowerCamelCase_ ) + "\n" )
preds_file.flush()
__magic_name__ : Optional[int] = []
if len(lowerCamelCase_ ) > 0:
__magic_name__ : int = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write("\n".join(lowerCamelCase_ ) )
preds_file.flush()
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = get_args()
main(args)
| 436 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (position - 1) // 2
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 1
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 2
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[str] ):
'''simple docstring'''
__a : list[tuple[T, int]] = []
__a : dict[T, int] = {}
__a : int = 0
def __len__( self : Any ):
'''simple docstring'''
return self.elements
def __repr__( self : Any ):
'''simple docstring'''
return str(self.heap )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.elements == 0
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
__a : List[Any] = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a : Dict = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
__a : str = (elem, weight)
if position > 0:
__a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : str = self.heap[curr_pos]
__a , __a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : int = self.position_map[elem]
__a , __a : Optional[Any] = self.heap[curr_pos]
__a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a : str = self.heap[child_left_position]
__a , __a : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__a , __a : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__a , __a : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.heap[nodea_pos][0]
__a : str = self.heap[nodea_pos][0]
__a , __a : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a : str = nodea_pos
__a : Optional[int] = nodea_pos
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[Any] ):
'''simple docstring'''
__a : dict[T, dict[T, int]] = {}
__a : int = 0
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Dict ):
'''simple docstring'''
return self.nodes
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
if node not in self.connections:
__a : Tuple = {}
self.nodes += 1
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = weight
__a : Any = weight
def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ):
__a : dict[T, int] = {node: maxsize for node in graph.connections}
__a : dict[T, T | None] = {node: None for node in graph.connections}
__a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase_ , lowerCamelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a : Optional[int] = priority_queue.extract_min()
__a : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__a : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Dict = node
return dist, parent
| 47 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase__( __lowerCamelCase ):
__magic_name__ : Any = ''''''
__magic_name__ : List[str] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , lowerCAmelCase : Optional[DatasetInfo] = None , lowerCAmelCase : Optional[str] = None , **lowerCAmelCase : int , )-> Union[str, Any]:
"""simple docstring"""
super().__init__(self , **SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = repo_info
UpperCAmelCase = token
UpperCAmelCase = None
def a__( self : int )-> Dict:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE__ ): {'''name''': str(SCREAMING_SNAKE_CASE__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def a__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str = "rb" , **lowerCAmelCase : Any , )-> str:
"""simple docstring"""
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE__ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def a__( self : int , lowerCAmelCase : Optional[int] , **lowerCAmelCase : Dict )-> Dict:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase = self._strip_protocol(SCREAMING_SNAKE_CASE__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE__ )
def a__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=False , **lowerCAmelCase : int )-> Dict:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase = PurePosixPath(path.strip('''/''' ) )
UpperCAmelCase = {}
for p, f in self.dir_cache.items():
UpperCAmelCase = PurePosixPath(p.strip('''/''' ) )
UpperCAmelCase = p.parent
if root == path:
UpperCAmelCase = f
UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 210 |
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47 | 0 |
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0, 1
while True:
lowerCamelCase_ = b, a + b
yield b
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = fibonacci_generator()
while len(str(next(lowerCamelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 70 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCAmelCase :Dict = TOKENIZER_CLASSES
else:
lowerCAmelCase :Optional[Any] = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase :Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase :List[str] = True
if checkpoint_name is None:
lowerCAmelCase :int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase :Any = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCAmelCase :int = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase :Any = checkpoint.split('/' )
lowerCAmelCase :List[str] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
lowerCAmelCase :Optional[int] = checkpoint
lowerCAmelCase :Optional[int] = dump_path
else:
lowerCAmelCase :Tuple = None
lowerCAmelCase :Tuple = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase :Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase :str = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase :Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase :int = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCAmelCase :Dict = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCamelCase_ )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 553 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_UpperCAmelCase : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : str = 2
while digits < n:
index += 1
_UpperCAmelCase : Tuple = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def __A ( lowerCAmelCase_ = 1000 ):
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 414 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 0 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Union[str, Any] = 16
_UpperCAmelCase : Tuple = 32
def _SCREAMING_SNAKE_CASE ( __snake_case : Accelerator , __snake_case : int = 1_6 ):
_A = AutoTokenizer.from_pretrained('bert-base-cased' )
_A = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A = 1_6
elif accelerator.mixed_precision != "no":
_A = 8
else:
_A = None
return tokenizer.pad(
lowerCamelCase_ , padding='longest' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ )
_A = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : List[str] ):
# Initialize accelerator
_A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config['lr']
_A = int(config['num_epochs'] )
_A = int(config['seed'] )
_A = int(config['batch_size'] )
_A = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_A = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_A = batch_size // MAX_GPU_BATCH_SIZE
_A = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase_ )
_A = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A = model.to(accelerator.device )
# Instantiate optimizer
_A = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
_A = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_A = model(**lowerCamelCase_ )
_A = outputs.loss
_A = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A = model(**lowerCamelCase_ )
_A = outputs.logits.argmax(dim=-1 )
_A = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
_A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ):
_A = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_A = parser.parse_args()
_A = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 107 |
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = ''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
__a : List[str] = int('0b' + data[0] + data[-1] , 2 )
__a : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = message[:4]
__a : str = message[4:]
__a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ )
__a : int = xor(lowerCamelCase_ , lowerCamelCase_ )
__a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
__a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] )
__a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
__a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r
__a : List[Any] = apply_table(l + r , lowerCamelCase_ )
__a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ = temp[:5]
SCREAMING_SNAKE_CASE__ = temp[5:]
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE__ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( __lowerCamelCase):
'''simple docstring'''
_snake_case = 42
_snake_case = 42
def __init__( self , a_ , a_ ) -> List[str]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = 2_0_0_0 , a_ = None , a_ = "pil" , a_ = True , **a_ , ) -> List[Any]:
lowercase : str = self.unet.config.sample_size
lowercase : Any = (batch_size, 3, img_size, img_size)
lowercase : List[str] = self.unet
lowercase : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ) * self.scheduler.init_noise_sigma
lowercase : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
self.scheduler.set_sigmas(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase : Any = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
lowercase : List[str] = self.scheduler.step_correct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# prediction step
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
lowercase : Any = self.scheduler.step_pred(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
lowercase : int = output.prev_sample, output.prev_sample_mean
lowercase : Tuple = sample_mean.clamp(0 , 1 )
lowercase : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : str = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 372 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = None
ops.enable_eager_execution_internal()
__a : Optional[Any] = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a : int = tf.config.list_logical_devices(device_type='CPU' )
__a : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a : List[str] = GradientAccumulator()
__a : Tuple = tf.Variable([4.0, 3.0] )
__a , __a : int = create_optimizer(5e-5 , 1_0 , 5 )
__a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
with strategy.scope():
__a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 47 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ):
for e in env_keys:
lowercase__ : List[str] = int(os.environ.get(lowerCamelCase_ , -1 ) )
if val >= 0:
return val
return default
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str=False ):
lowercase__ : Dict = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) )
return strtobool(lowerCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Tuple="no" ):
lowercase__ : str = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) )
return value
| 200 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : List[str] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : str = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : Union[str, Any] = use_cache
__a : str = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0E4 , lowerCAmelCase_ = False , lowerCAmelCase_ = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
lowerCamelCase_ : Dict = float(embedding_dim // 2)
lowerCamelCase_ : str = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
lowerCamelCase_ : Any = min_timescale * jnp.exp(jnp.arange(lowerCamelCase_ , dtype=jnp.floataa) * -log_timescale_increment)
lowerCamelCase_ : List[Any] = jnp.expand_dims(lowerCamelCase_ , 1) * jnp.expand_dims(lowerCamelCase_ , 0)
# scale embeddings
lowerCamelCase_ : Optional[int] = scale * emb
if flip_sin_to_cos:
lowerCamelCase_ : Any = jnp.concatenate([jnp.cos(lowerCamelCase_), jnp.sin(lowerCamelCase_)] , axis=1)
else:
lowerCamelCase_ : Tuple = jnp.concatenate([jnp.sin(lowerCamelCase_), jnp.cos(lowerCamelCase_)] , axis=1)
lowerCamelCase_ : str = jnp.reshape(lowerCamelCase_ , [jnp.shape(lowerCamelCase_)[0], embedding_dim])
return signal
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int = 32
__UpperCAmelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , a_ ):
lowerCamelCase_ : int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : str = nn.silu(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Tuple = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(SCREAMING_SNAKE_CASE__ )
return temb
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int = 32
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 1
@nn.compact
def __call__( self , a_ ):
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 250 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
UpperCamelCase =None
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , ):
import pyspark
def generate_fn():
__lowercase : List[Any] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
__lowercase : Optional[int] = df_with_partition_id.select('''*''' ).where(f"""part_id = {partition_id}""" ).drop('''part_id''' )
__lowercase : Optional[Any] = partition_df.collect()
__lowercase : Union[str, Any] = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCAmelCase_ ( _BaseExamplesIterable ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None , ) -> Optional[Any]:
__lowercase : List[str] = df
__lowercase : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__lowercase : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Optional[int]:
yield from self.generate_examples_fn()
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def _lowerCamelCase ( self ) -> int:
return len(self.partition_order )
class UpperCAmelCase_ ( datasets.DatasetBuilder ):
UpperCamelCase =SparkConfig
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[str]:
import pyspark
__lowercase : int = pyspark.sql.SparkSession.builder.getOrCreate()
__lowercase : Optional[int] = df
__lowercase : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def _lowerCamelCase ( self ) -> Optional[Any]:
def create_cache_and_write_probe(UpperCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__lowercase : List[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowercase : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _lowerCamelCase ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]:
import pyspark
def get_arrow_batch_size(UpperCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__lowercase : List[str] = self.df.count()
__lowercase : Dict = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowercase : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowercase : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__lowercase : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[str]:
import pyspark
__lowercase : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__lowercase : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__lowercase : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowercase : List[str] = self.config.features
__lowercase : int = self._writer_batch_size
__lowercase : Union[str, Any] = self._fs.storage_options
def write_arrow(UpperCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowercase : Any = pyspark.TaskContext().taskAttemptId()
__lowercase : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__lowercase : Any = 0
__lowercase : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__lowercase : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowercase : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__lowercase : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__lowercase : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__lowercase : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__lowercase : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowercase : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "arrow" , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> Union[str, Any]:
self._validate_cache_dir()
__lowercase : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__lowercase : Union[str, Any] = not is_remote_filesystem(self._fs )
__lowercase : Optional[Any] = os.path.join if is_local else posixpath.join
__lowercase : Any = '-TTTTT-SSSSS-of-NNNNN'
__lowercase : Union[str, Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowercase : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__lowercase : Any = 0
__lowercase : Dict = 0
__lowercase : int = 0
__lowercase : List[str] = []
__lowercase : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
__lowercase
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__lowercase : List[str] = total_num_examples
__lowercase : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowercase : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowercase : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , F"""{global_shard_id:05d}""" ).replace('''NNNNN''' , F"""{total_shards:05d}""" ) , )
__lowercase : Union[str, Any] = []
__lowercase : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowercase : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda UpperCamelCase_ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__lowercase : List[Any] = 0
__lowercase : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '''''' ) , )
def _lowerCamelCase ( self , UpperCamelCase_ , ) -> List[str]:
return SparkExamplesIterable(self.df )
| 76 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase = (DDPMParallelScheduler,)
def UpperCAmelCase__ ( self , **_lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : Dict = self.get_scheduler_config()
snake_case_ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : Dict = self.get_scheduler_config()
snake_case_ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = self.dummy_model()
snake_case_ : Optional[int] = self.dummy_sample_deter
snake_case_ : Any = self.dummy_sample_deter + 0.1
snake_case_ : Any = self.dummy_sample_deter - 0.1
snake_case_ : Optional[int] = samplea.shape[0]
snake_case_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case_ : Dict = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case_ : Optional[int] = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
snake_case_ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = self.dummy_model()
snake_case_ : Any = self.dummy_sample_deter
snake_case_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
snake_case_ : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
snake_case_ : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case_ : List[Any] = pred_prev_sample
snake_case_ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case_ : str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : str = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter
snake_case_ : int = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
snake_case_ : Dict = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
snake_case_ : str = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case_ : List[Any] = pred_prev_sample
snake_case_ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.scheduler_classes[0]
snake_case_ : Tuple = self.get_scheduler_config()
snake_case_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == len(SCREAMING_SNAKE_CASE__ ) - 1:
snake_case_ : List[str] = -1
else:
snake_case_ : Dict = timesteps[i + 1]
snake_case_ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : Dict = self.get_scheduler_config()
snake_case_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : str = [1_0_0, 8_7, 5_0, 1, 0]
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ , timesteps=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
| 58 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {}
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : List[str] = {}
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
"""simple docstring"""
__magic_name__ : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
__magic_name__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
__magic_name__ : Dict = format_type
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ):
"""simple docstring"""
__magic_name__ : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__magic_name__ : int = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCamelCase ( UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Optional[int] = get_format_type_from_alias(lowerCamelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'""" )
| 436 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 8
# DPR tok
__a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : List[str] = {'unk_token': '<unk>'}
__a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__a : List[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__a : Union[str, Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 47 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : Optional[int] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__( __lowerCamelCase ):
__magic_name__ : Union[str, Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] , **lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase = deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE__ , not kwargs.pop(SCREAMING_SNAKE_CASE__ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase = kwargs.pop('''torchscript''' , self.torchscript )
UpperCAmelCase = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
UpperCAmelCase = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE__ )
__magic_name__ : bool = field(default=__lowerCamelCase , metadata={"help": "Trace the models using torchscript"} )
__magic_name__ : bool = field(default=__lowerCamelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__magic_name__ : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def a__( self : Dict )-> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
UpperCAmelCase = torch.device('''cpu''' )
UpperCAmelCase = 0
elif is_torch_tpu_available():
UpperCAmelCase = xm.xla_device()
UpperCAmelCase = 0
else:
UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a__( self : int )-> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
return self.n_gpu > 0
| 210 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list[int] , lowercase : int ):
'''simple docstring'''
def count_of_possible_combinations(lowercase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowerCamelCase_ )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list[int] , lowercase : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
lowercase : int , lowercase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase_ = sum(
count_of_possible_combinations_with_dp_array(target - item , lowerCamelCase_ )
for item in array )
lowerCamelCase_ = answer
return answer
lowerCamelCase_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCamelCase_ , lowerCamelCase_ )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list[int] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = [0] * (target + 1)
lowerCamelCase_ = 1
for i in range(1 , target + 1 ):
for j in range(lowerCamelCase_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = 3
lowerCamelCase : List[str] = 5
lowerCamelCase : Optional[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 70 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 | 0 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
return (preds == labels).mean()
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
lowerCAmelCase :List[str] = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase :Any = fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
lowerCAmelCase :Dict = pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0]
lowerCAmelCase :List[str] = spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
| 553 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase_ : Dict = 250004
lowerCAmelCase_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __lowerCamelCase , unittest.TestCase ):
snake_case : List[str] = MBartTokenizer
snake_case : Tuple = MBartTokenizerFast
snake_case : Optional[int] = True
snake_case : Tuple = True
def snake_case_ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Any = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_UpperCAmelCase : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ (self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCAmelCase : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = tempfile.mkdtemp()
_UpperCAmelCase : Any = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_UpperCAmelCase : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
_UpperCAmelCase : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
_UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
_UpperCAmelCase : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase : Union[str, Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
snake_case : List[Any] = '''facebook/mbart-large-en-ro'''
snake_case : Optional[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
snake_case : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
snake_case : Optional[int] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case_ (cls ):
_UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_UpperCAmelCase : Dict = 1
return cls
def snake_case_ (self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def snake_case_ (self ):
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
_UpperCAmelCase : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_UpperCAmelCase : Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def snake_case_ (self ):
_UpperCAmelCase : Any = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = 1_0
_UpperCAmelCase : Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def snake_case_ (self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
_UpperCAmelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
_UpperCAmelCase : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case_ (self ):
_UpperCAmelCase : str = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors="""pt""" )
_UpperCAmelCase : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=1_0 , return_tensors="""pt""" )
_UpperCAmelCase : Optional[Any] = targets['input_ids']
_UpperCAmelCase : Dict = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 414 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, delta=SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( self : Any ) -> Any:
_A = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step, 3 )
self.assertEqual(len(accumulator.gradients ), 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2 )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = None
ops.enable_eager_execution_internal()
_A = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_A = tf.config.list_logical_devices(device_type='CPU' )
_A = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_A = GradientAccumulator()
_A = tf.Variable([4.0, 3.0] )
_A = create_optimizer(5e-5, 10, 5 )
_A = tf.Variable([0.0, 0.0], trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(UpperCamelCase__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) )
@tf.function
def accumulate(UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
with strategy.scope():
_A = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__, args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int ):
_A = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value(), SCREAMING_SNAKE_CASE__, tol=1e-2 )
self.assertListAlmostEqual(values[1].value(), SCREAMING_SNAKE_CASE__, tol=1e-2 )
accumulate([1.0, 2.0], [-1.0, 1.0] )
accumulate([3.0, -1.0], [-1.0, -1.0] )
accumulate([-2.0, 2.0], [3.0, -2.0] )
self.assertEqual(accumulator.step, 3 )
_check_local_values([2.0, 3.0], [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
_check_local_values([0.0, 0.0], [0.0, 0.0] )
| 107 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : List[str] = 1_0, 0.0
__a : Dict = self.dummy_model()
__a : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config(steps_offset=1 )
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : Any = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.dummy_model()
__a : int = self.dummy_sample_deter
__a : List[Any] = self.dummy_sample_deter + 0.1
__a : List[str] = self.dummy_sample_deter - 0.1
__a : Optional[Any] = samplea.shape[0]
__a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
__a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.full_loop()
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.full_loop(prediction_type='v_prediction' )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 47 | 0 |
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ ) -> Union[str, Any]:
lowercase : Union[str, Any] = size
lowercase : Optional[Any] = [0] * size
lowercase : Any = [0] * size
@staticmethod
def a__ ( a_ ) -> Optional[int]:
return index | (index + 1)
@staticmethod
def a__ ( a_ ) -> List[Any]:
return (index & (index + 1)) - 1
def a__ ( self , a_ , a_ ) -> Optional[int]:
lowercase : str = value
while index < self.size:
lowercase : int = self.get_prev(SCREAMING_SNAKE_CASE__ ) + 1
if current_left_border == index:
lowercase : List[Any] = value
else:
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self.get_next(SCREAMING_SNAKE_CASE__ )
def a__ ( self , a_ , a_ ) -> Union[str, Any]:
right -= 1 # Because of right is exclusive
lowercase : int = 0
while left <= right:
lowercase : str = self.get_prev(SCREAMING_SNAKE_CASE__ )
if left <= current_left:
lowercase : int = max(SCREAMING_SNAKE_CASE__ , self.tree[right] )
lowercase : str = current_left
else:
lowercase : Optional[Any] = max(SCREAMING_SNAKE_CASE__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 0 |
"""simple docstring"""
from collections import deque
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowercase__ : int = process_name # process name
lowercase__ : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ : int = arrival_time
lowercase__ : Any = burst_time # remaining burst time
lowercase__ : List[str] = 0 # total time of the process wait in ready queue
lowercase__ : Union[str, Any] = 0 # time from arrival time to completion time
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Dict:
lowercase__ : Dict = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ : int = time_slices
# unfinished process is in this ready_queue
lowercase__ : List[Any] = queue
# current time
lowercase__ : Dict = current_time
# finished process is in this sequence queue
lowercase__ : deque[Process] = deque()
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
lowercase__ : str = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Any:
lowercase__ : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
return [q.burst_time for q in queue]
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE__ ) != 0:
lowercase__ : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ : str = 0
# set the process's turnaround time because it is finished
lowercase__ : str = self.current_time - cp.arrival_time
# set the completion time
lowercase__ : Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
lowercase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowercase__ : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ : Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ : Optional[int] = 0
# set the finish time
lowercase__ : Dict = self.current_time
# update the process' turnaround time because it is finished
lowercase__ : Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__( self ) -> List[str]:
for i in range(self.number_of_queues - 1 ):
lowercase__ : Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__snake_case = Process('P1', 0, 53)
__snake_case = Process('P2', 0, 17)
__snake_case = Process('P3', 0, 68)
__snake_case = Process('P4', 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
__snake_case = Process('P1', 0, 53)
__snake_case = Process('P2', 0, 17)
__snake_case = Process('P3', 0, 68)
__snake_case = Process('P4', 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
__snake_case = MLFQ(number_of_queues, time_slices, queue, 0)
__snake_case = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 200 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 | 0 |
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Dict = set({"(", "[", "{"})
lowerCamelCase_ : str = set({")", "]", "}"})
lowerCamelCase_ : Dict = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowerCamelCase_)):
if s[i] in open_brackets:
stack.append(s[i])
elif s[i] in closed_brackets and (
len(lowerCamelCase_) == 0 or (len(lowerCamelCase_) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase_) == 0
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Any = input("Enter sequence of brackets: ")
if is_balanced(lowerCamelCase_):
print(lowerCamelCase_ , "is balanced")
else:
print(lowerCamelCase_ , "is not balanced")
if __name__ == "__main__":
main()
| 250 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = 'wavlm'
def __init__( self : Dict , __magic_name__ : Dict=32 , __magic_name__ : Tuple=768 , __magic_name__ : Tuple=12 , __magic_name__ : int=12 , __magic_name__ : int=3072 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Dict=0.0 , __magic_name__ : List[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[Any]=1E-5 , __magic_name__ : Union[str, Any]="group" , __magic_name__ : Tuple="gelu" , __magic_name__ : List[str]=(512, 512, 512, 512, 512, 512, 512) , __magic_name__ : Any=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : List[str]=False , __magic_name__ : List[str]=128 , __magic_name__ : int=16 , __magic_name__ : Dict=320 , __magic_name__ : Optional[Any]=800 , __magic_name__ : str=False , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[int]=0.05 , __magic_name__ : Tuple=10 , __magic_name__ : Any=2 , __magic_name__ : List[str]=0.0 , __magic_name__ : Dict=10 , __magic_name__ : Optional[Any]=320 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Dict=0.1 , __magic_name__ : Tuple=100 , __magic_name__ : int=256 , __magic_name__ : Union[str, Any]=256 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[Any]="mean" , __magic_name__ : Dict=False , __magic_name__ : List[str]=False , __magic_name__ : List[str]=256 , __magic_name__ : Optional[Any]=(512, 512, 512, 512, 1500) , __magic_name__ : Dict=(5, 3, 3, 1, 1) , __magic_name__ : Optional[Any]=(1, 2, 3, 1, 1) , __magic_name__ : int=512 , __magic_name__ : Any=80 , __magic_name__ : List[str]=0 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=2 , __magic_name__ : int=False , __magic_name__ : List[str]=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[Any]=None , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_norm
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(__magic_name__ )
lowerCAmelCase__ = list(__magic_name__ )
lowerCAmelCase__ = list(__magic_name__ )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_buckets
lowerCAmelCase__ = max_bucket_distance
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_ctc_classes
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = do_stable_layer_norm
lowerCAmelCase__ = use_weighted_layer_sum
lowerCAmelCase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = apply_spec_augment
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ = num_codevectors_per_group
lowerCAmelCase__ = num_codevector_groups
lowerCAmelCase__ = contrastive_logits_temperature
lowerCAmelCase__ = num_negatives
lowerCAmelCase__ = codevector_dim
lowerCAmelCase__ = proj_codevector_dim
lowerCAmelCase__ = diversity_loss_weight
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# adapter
lowerCAmelCase__ = add_adapter
lowerCAmelCase__ = adapter_kernel_size
lowerCAmelCase__ = adapter_stride
lowerCAmelCase__ = num_adapter_layers
lowerCAmelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ = list(__magic_name__ )
lowerCAmelCase__ = list(__magic_name__ )
lowerCAmelCase__ = list(__magic_name__ )
lowerCAmelCase__ = xvector_output_dim
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : Optional[Any] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 32
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = "gelu"
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFDistilBertForSequenceClassification(__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFDistilBertForMultipleChoice(__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFDistilBertForTokenClassification(__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
snake_case__ :Dict = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :List[str] = False
snake_case__ :Tuple = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase__ = TFDistilBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class A ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(__magic_name__ )[0]
lowerCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __magic_name__ )
lowerCAmelCase__ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-4 )
| 48 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , *__magic_name__ : Any , **__magic_name__ : Dict ):
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 48 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[Any] = KandinskyInpaintPipeline
snake_case__ :str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ :List[str] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ :List[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ :Tuple = False
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return 32
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return 100
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCAmelCase__ = MultilingualCLIP(__magic_name__ )
lowerCAmelCase__ = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ = UNetaDConditionModel(**__magic_name__ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="epsilon" , thresholding=__magic_name__ , )
lowerCAmelCase__ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(__magic_name__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(__magic_name__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(__magic_name__ )
else:
lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCAmelCase__ = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**__magic_name__ )
lowerCAmelCase__ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(__magic_name__ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase__ = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = "a hat"
lowerCAmelCase__ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
lowerCAmelCase__ = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ = pipeline(
__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 48 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[str] = 'bloom'
snake_case__ :str = ['past_key_values']
snake_case__ :Optional[Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Any , __magic_name__ : List[str]=250880 , __magic_name__ : Dict=64 , __magic_name__ : List[str]=2 , __magic_name__ : Optional[int]=8 , __magic_name__ : Any=1E-5 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=True , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]=2 , __magic_name__ : str=False , __magic_name__ : int=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=1 , __magic_name__ : Dict=False , **__magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase__ = kwargs.pop("n_embed" , __magic_name__ )
lowerCAmelCase__ = hidden_size if n_embed is None else n_embed
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = pretraining_tp
lowerCAmelCase__ = apply_residual_connection_post_layernorm
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = slow_but_exact
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[Any] = version.parse('1.12' )
def __init__( self : Dict , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , "pad_token_id" , __magic_name__ ):
# TODO: how to do that better?
lowerCAmelCase__ = 0
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" , inverted_values_shape=__magic_name__ )
lowerCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._config.n_layer
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._config.n_head
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return 1E-3
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : "PreTrainedTokenizer" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , ):
"""simple docstring"""
lowerCAmelCase__ = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase__ ,lowerCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase__ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase__ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return 13
| 48 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'dpt'
def __init__( self : int , __magic_name__ : int=768 , __magic_name__ : int=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Union[str, Any]=3072 , __magic_name__ : str="gelu" , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : str=0.02 , __magic_name__ : List[str]=1E-12 , __magic_name__ : Union[str, Any]=384 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Any=3 , __magic_name__ : List[str]=False , __magic_name__ : int=True , __magic_name__ : List[Any]=[2, 5, 8, 11] , __magic_name__ : List[Any]="project" , __magic_name__ : Tuple=[4, 2, 1, 0.5] , __magic_name__ : List[str]=[96, 192, 384, 768] , __magic_name__ : List[str]=256 , __magic_name__ : Any=-1 , __magic_name__ : Dict=False , __magic_name__ : Any=True , __magic_name__ : int=0.4 , __magic_name__ : Tuple=255 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Tuple=[1, 1024, 24, 24] , __magic_name__ : List[Any]=[0, 1] , __magic_name__ : str=None , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
lowerCAmelCase__ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
lowerCAmelCase__ = BitConfig(**__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
logger.info("Initializing the config with a `BiT` backbone." )
lowerCAmelCase__ = BitConfig(**__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowerCAmelCase__ = backbone_featmap_shape
lowerCAmelCase__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = []
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
lowerCAmelCase__ = readout_type
lowerCAmelCase__ = reassemble_factors
lowerCAmelCase__ = neck_hidden_sizes
lowerCAmelCase__ = fusion_hidden_size
lowerCAmelCase__ = head_in_index
lowerCAmelCase__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = semantic_loss_ignore_index
lowerCAmelCase__ = semantic_classifier_dropout
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 48 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase__ : str = get_tests_dir("fixtures")
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = mock.Mock()
lowerCAmelCase__ = 500
lowerCAmelCase__ = {}
lowerCAmelCase__ = HTTPError
lowerCAmelCase__ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__magic_name__ ) as mock_head:
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
lowerCAmelCase__ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="test-feature-extractor" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase__ = CustomFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = "▁"
UpperCAmelCase__ : Dict = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : int = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
UpperCAmelCase__ : Dict = {
"facebook/xglm-564M": 20_48,
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[Any] = VOCAB_FILES_NAMES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Tuple = ['input_ids', 'attention_mask']
def __init__( self : int , __magic_name__ : str , __magic_name__ : Optional[int]="<s>" , __magic_name__ : int="</s>" , __magic_name__ : Optional[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : List[str]="<unk>" , __magic_name__ : Optional[int]="<pad>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase__ = 7
lowerCAmelCase__ = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ ))
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ ))
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Dict ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Tuple ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = "".join(__magic_name__ ).replace(__magic_name__ , " " ).strip()
return out_string
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 48 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : Optional[Any] = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[str] = 'mask2former'
snake_case__ :Dict = ['swin']
snake_case__ :List[str] = {'hidden_size': 'hidden_dim'}
def __init__( self : Tuple , __magic_name__ : Optional[Dict] = None , __magic_name__ : int = 256 , __magic_name__ : int = 256 , __magic_name__ : int = 256 , __magic_name__ : int = 1024 , __magic_name__ : str = "relu" , __magic_name__ : int = 6 , __magic_name__ : int = 10 , __magic_name__ : int = 8 , __magic_name__ : float = 0.0 , __magic_name__ : int = 2048 , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : int = 4 , __magic_name__ : int = 255 , __magic_name__ : int = 100 , __magic_name__ : float = 0.1 , __magic_name__ : float = 2.0 , __magic_name__ : float = 5.0 , __magic_name__ : float = 5.0 , __magic_name__ : int = 12544 , __magic_name__ : float = 3.0 , __magic_name__ : float = 0.75 , __magic_name__ : float = 0.02 , __magic_name__ : float = 1.0 , __magic_name__ : bool = True , __magic_name__ : List[int] = [4, 8, 16, 32] , __magic_name__ : bool = None , **__magic_name__ : str , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
lowerCAmelCase__ = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__magic_name__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = backbone_config.pop("model_type" )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(__magic_name__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = mask_feature_size
lowerCAmelCase__ = hidden_dim
lowerCAmelCase__ = encoder_feedforward_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = dropout
lowerCAmelCase__ = dim_feedforward
lowerCAmelCase__ = pre_norm
lowerCAmelCase__ = enforce_input_projection
lowerCAmelCase__ = common_stride
lowerCAmelCase__ = ignore_value
lowerCAmelCase__ = num_queries
lowerCAmelCase__ = no_object_weight
lowerCAmelCase__ = class_weight
lowerCAmelCase__ = mask_weight
lowerCAmelCase__ = dice_weight
lowerCAmelCase__ = train_num_points
lowerCAmelCase__ = oversample_ratio
lowerCAmelCase__ = importance_sample_ratio
lowerCAmelCase__ = init_std
lowerCAmelCase__ = init_xavier_std
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = feature_strides
lowerCAmelCase__ = output_auxiliary_logits
lowerCAmelCase__ = decoder_layers
super().__init__(**__magic_name__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : PretrainedConfig , **__magic_name__ : str ):
"""simple docstring"""
return cls(
backbone_config=__magic_name__ , **__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def A ( UpperCamelCase_ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_ , np.ndarray ):
return list(tensor.shape )
lowerCAmelCase__ = tf.shape(UpperCamelCase_ )
if tensor.shape == tf.TensorShape(UpperCamelCase_ ):
return dynamic
lowerCAmelCase__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase_ )]
def A ( UpperCamelCase_ : tf.Tensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCamelCase_ , name=UpperCamelCase_ )
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=1E-5 , UpperCamelCase_ : Dict=-1 ) -> int:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowerCAmelCase__ ,lowerCAmelCase__ = tf.nn.moments(UpperCamelCase_ , axes=[axis] , keepdims=UpperCamelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCAmelCase__ = [1] * inputs.shape.rank
lowerCAmelCase__ = shape_list(UpperCamelCase_ )[axis]
lowerCAmelCase__ = tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
# Compute layer normalization using the batch_normalization
# function.
lowerCAmelCase__ = tf.nn.batch_normalization(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , offset=UpperCamelCase_ , scale=UpperCamelCase_ , variance_epsilon=UpperCamelCase_ , )
return outputs
def A ( UpperCamelCase_ : int , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : str=-1 ) -> Union[str, Any]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCAmelCase__ = tf.shape(UpperCamelCase_ )
lowerCAmelCase__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCAmelCase__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , tf.Tensor ):
lowerCAmelCase__ = tf.convert_to_tensor(UpperCamelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCAmelCase__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCAmelCase__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCAmelCase__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A ( UpperCamelCase_ : tf.Tensor , UpperCamelCase_ : int , UpperCamelCase_ : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
UpperCamelCase_ , tf.cast(UpperCamelCase_ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase_ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCAmelCase__ = [x for x in data if len(UpperCamelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
lowerCAmelCase__ = np.asarray(UpperCamelCase_ )
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.array_split(UpperCamelCase_ , UpperCamelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCAmelCase__ = np.array_split(UpperCamelCase_ , UpperCamelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = chunk_data
else:
lowerCAmelCase__ = data
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
if name in group.attrs:
lowerCAmelCase__ = [n.decode("utf8" ) if hasattr(UpperCamelCase_ , "decode" ) else n for n in group.attrs[name]]
else:
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(UpperCamelCase_ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def A ( UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
def _expand_single_ad_tensor(UpperCamelCase_ : Optional[Any] ):
if isinstance(UpperCamelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase_ )
| 48 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A ( unittest.TestCase ):
snake_case__ :Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
snake_case__ :str = TF_MODEL_FOR_MASKED_LM_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
lowerCAmelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-05, "token": 25506, "token_str": " accuser"},
] , )
lowerCAmelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-05,
"token": 25506,
"token_str": " accuser",
},
] , )
lowerCAmelCase__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
lowerCAmelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCAmelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCAmelCase__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-05, "token": 13606, "token_str": " Clara"},
] , )
lowerCAmelCase__ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=6 ) , [
[
{
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
lowerCAmelCase__ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(__magic_name__ )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
lowerCAmelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
lowerCAmelCase__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.run_pipeline_test(__magic_name__ , [] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.run_pipeline_test(__magic_name__ , [] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : List[str] ):
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
lowerCAmelCase__ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = fill_masker.tokenizer
lowerCAmelCase__ = fill_masker.model
lowerCAmelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__magic_name__ , [
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
] , )
lowerCAmelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__magic_name__ , [
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
] , )
lowerCAmelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__magic_name__ , [
[
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
],
[
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
],
] , )
with self.assertRaises(__magic_name__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__magic_name__ ):
fill_masker("This is" )
self.run_test_top_k(__magic_name__ , __magic_name__ )
self.run_test_targets(__magic_name__ , __magic_name__ )
self.run_test_top_k_targets(__magic_name__ , __magic_name__ )
self.fill_mask_with_duplicate_targets_and_top_k(__magic_name__ , __magic_name__ )
self.fill_mask_with_multiple_masks(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = tokenizer.get_vocab()
lowerCAmelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ , targets=__magic_name__ )
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__magic_name__ , [
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
] , )
lowerCAmelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __magic_name__ )
lowerCAmelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__magic_name__ ) )
# Call argument
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__magic_name__ )
self.assertEqual(
__magic_name__ , [
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
] , )
lowerCAmelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __magic_name__ )
lowerCAmelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__magic_name__ ) )
# Score equivalence
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__magic_name__ )
lowerCAmelCase__ = [top_mask["token_str"] for top_mask in outputs]
lowerCAmelCase__ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__magic_name__ ) == set(__magic_name__ ):
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__magic_name__ )
lowerCAmelCase__ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__magic_name__ ) , nested_simplify(__magic_name__ ) )
# Raises with invalid
with self.assertRaises(__magic_name__ ):
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__magic_name__ ):
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(__magic_name__ ):
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ , top_k=2 )
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__magic_name__ , [
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
] , )
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__magic_name__ , [
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
] , )
self.assertEqual(nested_simplify(__magic_name__ ) , nested_simplify(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Dict , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = tokenizer.get_vocab()
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
# top_k=2, ntargets=3
lowerCAmelCase__ = sorted(vocab.keys() )[:3]
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__magic_name__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCAmelCase__ = [el["token_str"] for el in sorted(__magic_name__ , key=lambda __magic_name__ : x["score"] , reverse=__magic_name__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__magic_name__ ).issubset(__magic_name__ ):
lowerCAmelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__magic_name__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__magic_name__ ) , nested_simplify(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
lowerCAmelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCAmelCase__ = sorted(vocab.keys() )[:3]
lowerCAmelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCAmelCase__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__magic_name__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__magic_name__ ) , 3 )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = FillMaskPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
lowerCAmelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__magic_name__ , [
[
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
],
[
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
],
[
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
{"sequence": ANY(__magic_name__ ), "score": ANY(__magic_name__ ), "token": ANY(__magic_name__ ), "token_str": ANY(__magic_name__ )},
],
] , )
| 48 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
snake_case__ :List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = (3, 32, 128)
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
lowerCAmelCase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowerCAmelCase__ = Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) )
return image_input
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__magic_name__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = "test"
lowerCAmelCase__ = processor(text=__magic_name__ )
lowerCAmelCase__ = tokenizer(__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = "test"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.char_decode(__magic_name__ )
lowerCAmelCase__ = tokenizer.batch_decode(__magic_name__ )
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = torch.randn(1 , 27 , 38 )
lowerCAmelCase__ = torch.randn(1 , 27 , 50257 )
lowerCAmelCase__ = torch.randn(1 , 27 , 30522 )
lowerCAmelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 48 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase__ : Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase__ : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
UpperCAmelCase__ : Union[str, Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
UpperCAmelCase__ : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
UpperCAmelCase__ : Any = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
UpperCAmelCase__ : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 48 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
from math import sqrt
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def A ( UpperCamelCase_ : int = 1_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 48 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = ['pixel_values']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 256}
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ = get_resize_output_image_size(__magic_name__ , size=size["shortest_edge"] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : List[Tuple] = None ):
"""simple docstring"""
lowerCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__magic_name__ ):
lowerCAmelCase__ = target_sizes.numpy()
lowerCAmelCase__ = []
for idx in range(len(__magic_name__ ) ):
lowerCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__magic_name__ )
lowerCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
lowerCAmelCase__ = logits.argmax(dim=1 )
lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 48 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
UpperCAmelCase__ : Tuple = datasets.load_iris()
UpperCAmelCase__ : Any = np.array(data["data"])
UpperCAmelCase__ : Union[str, Any] = np.array(data["target"])
UpperCAmelCase__ : List[str] = data["target_names"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = train_test_split(X, y)
def A ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
return np.linalg.norm(np.array(UpperCamelCase_ ) - np.array(UpperCamelCase_ ) )
def A ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str=5 ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = zip(UpperCamelCase_ , UpperCamelCase_ )
# List of distances of all points from the point to be classified
lowerCAmelCase__ = []
for data_point in data:
lowerCAmelCase__ = euclidean_distance(data_point[0] , UpperCamelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase__ = [i[1] for i in sorted(UpperCamelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase__ = Counter(UpperCamelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 48 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = 'falcon'
snake_case__ :Optional[Any] = ['past_key_values']
def __init__( self : int , __magic_name__ : Dict=65024 , __magic_name__ : Tuple=4544 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : List[Any]=71 , __magic_name__ : Tuple=1E-5 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Any=None , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=False , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=True , __magic_name__ : int=False , __magic_name__ : Optional[Any]=11 , __magic_name__ : Dict=11 , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase__ = kwargs.pop("n_embed" , __magic_name__ )
lowerCAmelCase__ = hidden_size if n_embed is None else n_embed
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase__ = alibi
lowerCAmelCase__ = new_decoder_architecture
lowerCAmelCase__ = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase__ = parallel_attn
lowerCAmelCase__ = bias
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return not self.alibi
| 48 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A :
def __init__( self : Dict , __magic_name__ : List[str] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : int="resnet50" , __magic_name__ : List[Any]=3 , __magic_name__ : Dict=32 , __magic_name__ : int=3 , __magic_name__ : List[str]=True , __magic_name__ : Any=True , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = out_indices if out_indices is not None else [4]
lowerCAmelCase__ = stage_names
lowerCAmelCase__ = out_features
lowerCAmelCase__ = backbone
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = is_training
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (TimmBackbone,) if is_torch_available() else ()
snake_case__ :Optional[int] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case__ :Optional[Any] = False
snake_case__ :Union[str, Any] = False
snake_case__ :Tuple = False
snake_case__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = TimmBackboneModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "resnet18"
lowerCAmelCase__ = "microsoft/resnet-18"
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
lowerCAmelCase__ = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__magic_name__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase__ = self.all_model_classes[0]
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
lowerCAmelCase__ = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = model(**__magic_name__ )
lowerCAmelCase__ = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase__ = copy.deepcopy(__magic_name__ )
lowerCAmelCase__ = None
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase__ = copy.deepcopy(__magic_name__ )
lowerCAmelCase__ = False
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(**__magic_name__ )
| 48 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase__ : Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase__ = "lm_head"
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , UpperCamelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "bias" in name:
lowerCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = "weight"
else:
lowerCAmelCase__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ = name.split("." )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase_ )
@torch.no_grad()
def A ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase__ = UniSpeechConfig.from_pretrained(UpperCamelCase_ )
else:
lowerCAmelCase__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load_from_json(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "vocab.json" )
if not os.path.isdir(UpperCamelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 42
lowerCAmelCase__ = 43
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = WavaVecaPhonemeCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase_ , )
lowerCAmelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = UniSpeechForCTC(UpperCamelCase_ )
else:
lowerCAmelCase__ = UniSpeechForPreTraining(UpperCamelCase_ )
if is_finetuned:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
hf_unispeech.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 48 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = [], []
while len(UpperCamelCase_ ) > 1:
lowerCAmelCase__ ,lowerCAmelCase__ = min(UpperCamelCase_ ), max(UpperCamelCase_ )
start.append(UpperCamelCase_ )
end.append(UpperCamelCase_ )
collection.remove(UpperCamelCase_ )
collection.remove(UpperCamelCase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase__ : str = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ : str = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 48 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase__ : Union[str, Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase__ : Tuple = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
UpperCAmelCase__ : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(__magic_name__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(__magic_name__ )]
lowerCAmelCase__ = TER(
normalized=__magic_name__ , no_punct=__magic_name__ , asian_support=__magic_name__ , case_sensitive=__magic_name__ , )
lowerCAmelCase__ = sb_ter.corpus_score(__magic_name__ , __magic_name__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 1 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCAmelCase__ : List[str] = "src/diffusers"
# Matches is_xxx_available()
UpperCAmelCase__ : Tuple = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
UpperCAmelCase__ : Optional[int] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
UpperCAmelCase__ : Tuple = "\n{0} = None\n"
UpperCAmelCase__ : str = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
UpperCAmelCase__ : Dict = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def A ( UpperCamelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = _re_backend.findall(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 0:
return None
return "_and_".join(UpperCamelCase_ )
def A ( ) -> Tuple:
'''simple docstring'''
with open(os.path.join(UpperCamelCase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCAmelCase__ = 0
lowerCAmelCase__ = {}
# Go through the end of the file
while line_index < len(UpperCamelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCAmelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(UpperCamelCase_ ) and len(lines[line_index] ) > 1:
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_single_line_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(UpperCamelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCamelCase_ , UpperCamelCase_ )
else:
return DUMMY_CLASS.format(UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any]=None ) -> int:
'''simple docstring'''
if backend_specific_objects is None:
lowerCAmelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCAmelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCAmelCase__ = "[" + ", ".join(F"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
lowerCAmelCase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCamelCase_ , UpperCamelCase_ ) for o in objects] )
lowerCAmelCase__ = dummy_file
return dummy_files
def A ( UpperCamelCase_ : Dict=False ) -> str:
'''simple docstring'''
lowerCAmelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCAmelCase__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "utils" )
lowerCAmelCase__ = {
backend: os.path.join(UpperCamelCase_ , F"""dummy_{short_names.get(UpperCamelCase_ , UpperCamelCase_ )}_objects.py""" )
for backend in dummy_files.keys()
}
lowerCAmelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.read()
else:
lowerCAmelCase__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase_ , UpperCamelCase_ )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"""diffusers.utils.dummy_{short_names.get(UpperCamelCase_ , UpperCamelCase_ )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 48 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 1 |
'''simple docstring'''
from PIL import Image
def A ( UpperCamelCase_ : Image , UpperCamelCase_ : int ) -> Image:
'''simple docstring'''
lowerCAmelCase__ = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(UpperCamelCase_ : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(UpperCamelCase_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
UpperCAmelCase__ : List[str] = change_contrast(img, 1_70)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 48 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCAmelCase__ : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'rag'
snake_case__ :int = True
def __init__( self : Dict , __magic_name__ : Tuple=None , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=None , __magic_name__ : int=None , __magic_name__ : Optional[int]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : int=None , __magic_name__ : Dict=" / " , __magic_name__ : Optional[Any]=" // " , __magic_name__ : Dict=5 , __magic_name__ : Any=300 , __magic_name__ : int=768 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]="wiki_dpr" , __magic_name__ : Union[str, Any]="train" , __magic_name__ : str="compressed" , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , __magic_name__ : int=False , __magic_name__ : Dict=False , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : Dict=False , __magic_name__ : List[str]=True , __magic_name__ : Dict=None , **__magic_name__ : List[str] , ):
"""simple docstring"""
super().__init__(
bos_token_id=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , prefix=__magic_name__ , vocab_size=__magic_name__ , **__magic_name__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCAmelCase__ = kwargs.pop("question_encoder" )
lowerCAmelCase__ = question_encoder_config.pop("model_type" )
lowerCAmelCase__ = kwargs.pop("generator" )
lowerCAmelCase__ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase__ = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = reduce_loss
lowerCAmelCase__ = label_smoothing
lowerCAmelCase__ = exclude_bos_score
lowerCAmelCase__ = do_marginalize
lowerCAmelCase__ = title_sep
lowerCAmelCase__ = doc_sep
lowerCAmelCase__ = n_docs
lowerCAmelCase__ = max_combined_length
lowerCAmelCase__ = dataset
lowerCAmelCase__ = dataset_split
lowerCAmelCase__ = index_name
lowerCAmelCase__ = retrieval_vector_size
lowerCAmelCase__ = retrieval_batch_size
lowerCAmelCase__ = passages_path
lowerCAmelCase__ = index_path
lowerCAmelCase__ = use_dummy_dataset
lowerCAmelCase__ = output_retrieved
lowerCAmelCase__ = do_deduplication
lowerCAmelCase__ = use_cache
if self.forced_eos_token_id is None:
lowerCAmelCase__ = getattr(self.generator , "forced_eos_token_id" , __magic_name__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.question_encoder.to_dict()
lowerCAmelCase__ = self.generator.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 48 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
def A ( UpperCamelCase_ : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
lowerCAmelCase__ = nums[0]
lowerCAmelCase__ = 0
for num in nums[1:]:
lowerCAmelCase__ ,lowerCAmelCase__ = (
max_excluding + num,
max(UpperCamelCase_ , UpperCamelCase_ ),
)
return max(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCamelCase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase_ ):
return None
lowerCAmelCase__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCAmelCase__ = left
lowerCAmelCase__ = point
elif point > right:
lowerCAmelCase__ = right
lowerCAmelCase__ = point
else:
if item < current_item:
lowerCAmelCase__ = point - 1
else:
lowerCAmelCase__ = point + 1
return None
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase_ , UpperCamelCase_ , point + 1 , UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if collection != sorted(UpperCamelCase_ ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
UpperCAmelCase__ : Dict = 0
if debug == 1:
UpperCAmelCase__ : Tuple = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
UpperCAmelCase__ : Any = 67
UpperCAmelCase__ : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(F"{target} found at positions: {result}")
else:
print("Not found")
| 48 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = ['pixel_values']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : int = 0.9 , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = crop_pct
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[float] = None , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCAmelCase__ = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCAmelCase__ = int(size["height"] / crop_pct )
else:
lowerCAmelCase__ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(__magic_name__ ) )
lowerCAmelCase__ = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
else:
if "shortest_edge" in size:
lowerCAmelCase__ = get_resize_output_image_size(__magic_name__ , size=size["shortest_edge"] , default_to_square=__magic_name__ )
elif "height" in size and "width" in size:
lowerCAmelCase__ = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(__magic_name__ ) )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : int = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 1 |
'''simple docstring'''
UpperCAmelCase__ : str = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
UpperCAmelCase__ : Any = ["a", "b", "c", "d", "e"]
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = start
# add current to visited
visited.append(UpperCamelCase_ )
lowerCAmelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
for vertice in vertices:
if vertice not in visited:
lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase__ : Dict = topological_sort("a", [], [])
print(sort)
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ :str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ :ClassVar[Features] = Features({'text': Value('string' )} )
snake_case__ :ClassVar[Features] = Features({'summary': Value('string' )} )
snake_case__ :str = "text"
snake_case__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 48 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=7 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Union[str, Any]=18 , __magic_name__ : Optional[int]=30 , __magic_name__ : int=400 , __magic_name__ : Any=True , __magic_name__ : Tuple=None , __magic_name__ : Optional[int]=True , __magic_name__ : str=None , __magic_name__ : Optional[Any]=True , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_flip_channel_order
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Dict = MobileViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = MobileViTImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "do_center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "do_flip_channel_order" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 48 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> str:
'''simple docstring'''
lowerCAmelCase__ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowerCAmelCase__ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase_ )
DownloadCommand.register_subcommand(UpperCamelCase_ )
EnvironmentCommand.register_subcommand(UpperCamelCase_ )
RunCommand.register_subcommand(UpperCamelCase_ )
ServeCommand.register_subcommand(UpperCamelCase_ )
UserCommands.register_subcommand(UpperCamelCase_ )
AddNewModelCommand.register_subcommand(UpperCamelCase_ )
AddNewModelLikeCommand.register_subcommand(UpperCamelCase_ )
LfsCommands.register_subcommand(UpperCamelCase_ )
PTtoTFCommand.register_subcommand(UpperCamelCase_ )
# Let's go
lowerCAmelCase__ = parser.parse_args()
if not hasattr(UpperCamelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase__ = args.func(UpperCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 48 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A ( UpperCamelCase_ : Tuple ) -> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def A ( UpperCamelCase_ : int ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class A :
snake_case__ :int
snake_case__ :str
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = [1, 2]
lowerCAmelCase__ = {"a": 1, "b": 2}
lowerCAmelCase__ = {"a": [1, 2], "b": [3, 4]}
lowerCAmelCase__ = {"a": {"1": 1}, "b": 2}
lowerCAmelCase__ = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCAmelCase__ = {}
lowerCAmelCase__ = []
lowerCAmelCase__ = 2
lowerCAmelCase__ = [2, 3]
lowerCAmelCase__ = {"a": 2, "b": 3}
lowerCAmelCase__ = {"a": [2, 3], "b": [4, 5]}
lowerCAmelCase__ = {"a": {"1": 2}, "b": 3}
lowerCAmelCase__ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowerCAmelCase__ = 2
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
lowerCAmelCase__ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
lowerCAmelCase__ = {"a": 2, "b": 0, "c": 2}
lowerCAmelCase__ = {
"a": np.eye(2 ).astype(__magic_name__ ),
"b": np.zeros(3 ).astype(__magic_name__ ),
"c": np.ones(2 ).astype(__magic_name__ ),
}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__magic_name__ ): # can't pickle a local lambda
map_nested(lambda __magic_name__ : x + 1 , __magic_name__ , num_proc=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {"a": 1, "b": 2}
lowerCAmelCase__ = {"a": 3, "b": 4}
lowerCAmelCase__ = {"a": 5, "b": 6}
lowerCAmelCase__ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__magic_name__ , __magic_name__ , __magic_name__ ) ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
class A :
snake_case__ :Union[str, Any] = 'bar'
lowerCAmelCase__ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__magic_name__ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> Dict:
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
lowerCAmelCase__ = {F"""{i}""": i for i in range(UpperCamelCase_ )}
lowerCAmelCase__ = map_nested(lambda UpperCamelCase_ : x + 10 , UpperCamelCase_ , num_proc=UpperCamelCase_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( SCREAMING_SNAKE_CASE__ ):
@require_tf
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
lowerCAmelCase__ = layers.Dense(2 )
def gen_random_output():
lowerCAmelCase__ = tf.random.uniform((1, 3) )
return model(__magic_name__ ).numpy()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
lowerCAmelCase__ = gen_random_output()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
lowerCAmelCase__ = gen_random_output()
lowerCAmelCase__ = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
import torch
def gen_random_output():
lowerCAmelCase__ = torch.nn.Linear(3 , 2 )
lowerCAmelCase__ = torch.rand(1 , 3 )
return model(__magic_name__ ).detach().numpy()
with temp_seed(42 , set_pytorch=__magic_name__ ):
lowerCAmelCase__ = gen_random_output()
with temp_seed(42 , set_pytorch=__magic_name__ ):
lowerCAmelCase__ = gen_random_output()
lowerCAmelCase__ = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowerCAmelCase__ = gen_random_output()
with temp_seed(42 ):
lowerCAmelCase__ = gen_random_output()
lowerCAmelCase__ = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = NestedDataStructure(UpperCamelCase_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = NestedDataStructure(UpperCamelCase_ ).flatten()
assert output == expected_output
def A ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = A(x=1 , y="foobar" )
lowerCAmelCase__ = {"x": 1, "y": "foobar"}
assert asdict(UpperCamelCase_ ) == expected_output
lowerCAmelCase__ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
lowerCAmelCase__ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(UpperCamelCase_ ) == expected_output
with pytest.raises(UpperCamelCase_ ):
asdict([1, A(x=10 , y="foo" )] )
def A ( UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
return text.split()
def A ( UpperCamelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A ( ) -> Optional[int]:
'''simple docstring'''
with Pool(2 ) as pool:
lowerCAmelCase__ = list(iflatmap_unordered(UpperCamelCase_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCamelCase_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCAmelCase__ = list(iflatmap_unordered(UpperCamelCase_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCamelCase_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCAmelCase__ = []
for yield_time, content in iflatmap_unordered(
UpperCamelCase_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase_ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(UpperCamelCase_ ) == 4
| 48 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Optional[Any] = StableDiffusionPanoramaPipeline
snake_case__ :Tuple = TEXT_TO_IMAGE_PARAMS
snake_case__ :List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ :Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler()
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase__ = CLIPTextModel(__magic_name__ )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : int=0 ):
"""simple docstring"""
lowerCAmelCase__ = torch.manual_seed(__magic_name__ )
lowerCAmelCase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCAmelCase__ = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ )
lowerCAmelCase__ = sd_pipe(**__magic_name__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCAmelCase__ = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ )
lowerCAmelCase__ = "french fries"
lowerCAmelCase__ = sd_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCAmelCase__ = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ )
lowerCAmelCase__ = sd_pipe(**__magic_name__ , view_batch_size=2 )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
lowerCAmelCase__ = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCAmelCase__ = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ )
lowerCAmelCase__ = sd_pipe(**__magic_name__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=__magic_name__ )
lowerCAmelCase__ = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCAmelCase__ = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ )
lowerCAmelCase__ = sd_pipe(**__magic_name__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : int=0 ):
"""simple docstring"""
lowerCAmelCase__ = torch.manual_seed(__magic_name__ )
lowerCAmelCase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase__ = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="scheduler" )
lowerCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = self.get_inputs()
lowerCAmelCase__ = pipe(**__magic_name__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCAmelCase__ = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=__magic_name__ )
lowerCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = self.get_inputs()
lowerCAmelCase__ = pipe(**__magic_name__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCAmelCase__ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = 0
def callback_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : torch.FloatTensor ) -> None:
lowerCAmelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCAmelCase__ = False
lowerCAmelCase__ = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase__ = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="scheduler" )
lowerCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
lowerCAmelCase__ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = self.get_inputs()
pipe(**__magic_name__ , callback=__magic_name__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase__ = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="scheduler" )
lowerCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
lowerCAmelCase__ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = self.get_inputs()
lowerCAmelCase__ = pipe(**__magic_name__ )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 48 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A :
def __init__( self : str , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = None
class A :
def __init__( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __iter__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.head
while self.head:
yield node.data
lowerCAmelCase__ = node.next
if node == self.head:
break
def __len__( self : Tuple ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : int ):
"""simple docstring"""
return "->".join(str(__magic_name__ ) for item in iter(self ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
self.insert_nth(0 , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase__ = Node(__magic_name__ )
if self.head is None:
lowerCAmelCase__ = new_node # first node points itself
lowerCAmelCase__ = lowerCAmelCase__ = new_node
elif index == 0: # insert at head
lowerCAmelCase__ = self.head
lowerCAmelCase__ = lowerCAmelCase__ = new_node
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ = new_node
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase__ = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ = lowerCAmelCase__ = None
elif index == 0: # delete head node
lowerCAmelCase__ = self.tail.next.next
lowerCAmelCase__ = self.head.next
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ = temp
return delete_node.data
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return len(self ) == 0
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = CircularLinkedList()
assert len(UpperCamelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase_ ) == i
circular_linked_list.insert_nth(UpperCamelCase_ , i + 1 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from math import sqrt
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def A ( UpperCamelCase_ : int = 1_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 48 | 1 |
'''simple docstring'''
import torch
from transformers import AutoModel
class A ( torch.nn.Module ):
def __init__( self : str , __magic_name__ : List[Any]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__magic_name__ , self ).__init__()
lowerCAmelCase__ = AutoModel.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
lowerCAmelCase__ = torch.nn.CosineSimilarity(3 , 1E-08 )
lowerCAmelCase__ = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return self.bert(**__magic_name__ ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[int]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = W_supports["sizes"].tolist()
lowerCAmelCase__ = W_supports["start_token_id"].item()
lowerCAmelCase__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase__ = self.BERT(**__magic_name__ )
lowerCAmelCase__ = self.BERT(**__magic_name__ )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = W_supports["input_ids"] == start_token_id
lowerCAmelCase__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__magic_name__ ):
if i == 0:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = support_sizes[i - 1]
lowerCAmelCase__ = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase__ = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase__ = torch.vstack((p_starts, p_start) )
lowerCAmelCase__ = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase__ = p_start
lowerCAmelCase__ = p_end
return p_starts, p_ends
| 48 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 48 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 1 |
'''simple docstring'''
from itertools import permutations
def A ( UpperCamelCase_ : tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def A ( UpperCamelCase_ : int = 10 ) -> int:
'''simple docstring'''
return sum(
int("".join(map(UpperCamelCase_ , UpperCamelCase_ ) ) )
for num in permutations(range(UpperCamelCase_ ) )
if is_substring_divisible(UpperCamelCase_ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 48 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = TextDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_text_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"text": "string"}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = TextDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_text_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A ( UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"text": "string"}
lowerCAmelCase__ = TextDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , split=UpperCamelCase_ ).read()
_check_text_dataset(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
if issubclass(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = text_path
elif issubclass(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = [text_path]
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"text": "string"}
lowerCAmelCase__ = TextDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_text_dataset(UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=("train",) ) -> int:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = TextDatasetReader({"train": text_path} , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_text_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase__ = {"text": "string"}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = TextDatasetReader({"train": text_path} , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_text_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: text_path}
else:
lowerCAmelCase__ = "train"
lowerCAmelCase__ = {"train": text_path, "test": text_path}
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"text": "string"}
lowerCAmelCase__ = TextDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_text_datasetdict(UpperCamelCase_ , UpperCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 1 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : int=1024 , __magic_name__ : Union[str, Any]=1024 , __magic_name__ : Union[str, Any]=3.6 ):
"""simple docstring"""
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = tokenizer.bos_token_id
lowerCAmelCase__ = dataset
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ):
"""simple docstring"""
lowerCAmelCase__ = iter(self.dataset )
lowerCAmelCase__ = True
while more_examples:
lowerCAmelCase__ ,lowerCAmelCase__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__magic_name__ )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCAmelCase__ = False
break
lowerCAmelCase__ = tokenizer(__magic_name__ , truncation=__magic_name__ )["input_ids"]
lowerCAmelCase__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__magic_name__ ) , self.seq_length ):
lowerCAmelCase__ = all_token_ids[i : i + self.seq_length]
if len(__magic_name__ ) == self.seq_length:
yield torch.tensor(__magic_name__ )
def A ( UpperCamelCase_ : str ) -> int:
'''simple docstring'''
lowerCAmelCase__ = {"streaming": True}
lowerCAmelCase__ = load_dataset(args.dataset_name , split="train" , **UpperCamelCase_ )
lowerCAmelCase__ = ConstantLengthDataset(UpperCamelCase_ , UpperCamelCase_ , seq_length=args.seq_length )
lowerCAmelCase__ = DataLoader(UpperCamelCase_ , batch_size=args.batch_size )
return eval_dataloader
def A ( UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
model.eval()
lowerCAmelCase__ = []
for step, batch in enumerate(UpperCamelCase_ ):
with torch.no_grad():
lowerCAmelCase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
lowerCAmelCase__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCAmelCase__ = torch.mean(torch.cat(UpperCamelCase_ ) )
try:
lowerCAmelCase__ = torch.exp(UpperCamelCase_ )
except OverflowError:
lowerCAmelCase__ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase__ : Any = Accelerator()
# Parse configuration
UpperCAmelCase__ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase__ : int = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase__ : Any = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase__ : Tuple = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = evaluate(args)
logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 48 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
lowerCAmelCase__ = len(bin(UpperCamelCase_ )[3:] )
lowerCAmelCase__ = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
lowerCAmelCase__ = (
(
"1"
+ "0" * (binary_number_length - len(UpperCamelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase__ : Optional[Any] = "Muhammad Umer Farooq"
UpperCAmelCase__ : Dict = "MIT"
UpperCAmelCase__ : int = "1.0.0"
UpperCAmelCase__ : Optional[int] = "Muhammad Umer Farooq"
UpperCAmelCase__ : Optional[Any] = "[email protected]"
UpperCAmelCase__ : List[str] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : str ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = []
lowerCAmelCase__ = domain
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCAmelCase__ = parse.urljoin(self.domain , __magic_name__ )
self.urls.append(__magic_name__ )
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(UpperCamelCase_ ).split("." )[-2:] )
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
return parse.urlparse(UpperCamelCase_ ).netloc
def A ( UpperCamelCase_ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
lowerCAmelCase__ = get_domain_name(UpperCamelCase_ )
# Initialize the parser
lowerCAmelCase__ = Parser(UpperCamelCase_ )
try:
# Open URL
lowerCAmelCase__ = requests.get(UpperCamelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCAmelCase__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCAmelCase__ = requests.get(UpperCamelCase_ )
# Get the valid email.
lowerCAmelCase__ = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = emails_from_url("https://github.com")
print(F"{len(emails)} emails found:")
print("\n".join(sorted(emails)))
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 1 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 48 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : int , *__magic_name__ : Any , **__magic_name__ : Any ):
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 48 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = 'fnet'
def __init__( self : Optional[Any] , __magic_name__ : Tuple=32000 , __magic_name__ : str=768 , __magic_name__ : int=12 , __magic_name__ : Any=3072 , __magic_name__ : str="gelu_new" , __magic_name__ : Dict=0.1 , __magic_name__ : int=512 , __magic_name__ : int=4 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Tuple=1E-12 , __magic_name__ : List[str]=False , __magic_name__ : Union[str, Any]=512 , __magic_name__ : str=3 , __magic_name__ : Tuple=1 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = use_tpu_fourier_optimizations
lowerCAmelCase__ = tpu_short_seq_length
| 48 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 1 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A :
def __init__( self : Optional[Any] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = [0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def __SCREAMING_SNAKE_CASE ( __magic_name__ : Optional[int] , __magic_name__ : str ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(struct.unpack(">16L" , __magic_name__ ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.padding()
lowerCAmelCase__ = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ = self.expand_block(__magic_name__ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ = (b & c) | ((~b) & d)
lowerCAmelCase__ = 0X5A82_7999
elif 20 <= i < 40:
lowerCAmelCase__ = b ^ c ^ d
lowerCAmelCase__ = 0X6ED9_EBA1
elif 40 <= i < 60:
lowerCAmelCase__ = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ = 0X8F1B_BCDC
elif 60 <= i < 80:
lowerCAmelCase__ = b ^ c ^ d
lowerCAmelCase__ = 0XCA62_C1D6
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = (
self.rotate(__magic_name__ , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(__magic_name__ , 30 ),
c,
d,
)
lowerCAmelCase__ = (
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def A ( ) -> int:
'''simple docstring'''
lowerCAmelCase__ = B"Test String"
assert SHAaHash(UpperCamelCase_ ).final_hash() == hashlib.shaa(UpperCamelCase_ ).hexdigest() # noqa: S324
def A ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase__ = f.read()
else:
lowerCAmelCase__ = bytes(UpperCamelCase_ , "utf-8" )
print(SHAaHash(UpperCamelCase_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : list[int] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase__ = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if numbers[j] < numbers[i]:
lowerCAmelCase__ ,lowerCAmelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ : str = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 48 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 1 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : Any = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Optional[int] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase__ : int = spec.loader.load_module()
UpperCAmelCase__ : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase__ : List[Any] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
UpperCAmelCase__ : Union[str, Any] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def A ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase__ = False
# source code of `config_class`
lowerCAmelCase__ = inspect.getsource(UpperCamelCase_ )
lowerCAmelCase__ = _re_checkpoint.findall(UpperCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase__ ,lowerCAmelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ = True
break
lowerCAmelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(sorted(UpperCamelCase_ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class A :
def __init__( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = cva.imread(__magic_name__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
lowerCAmelCase__ = np.sum(__magic_name__ )
for i in range(len(__magic_name__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__magic_name__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase__ : Any = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCAmelCase__ : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 48 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : str = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : int = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 48 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :jnp.ndarray
@flax_register_to_config
class A ( nn.Module , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = 32
snake_case__ :int = 4
snake_case__ :int = 4
snake_case__ :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ :Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case__ :Union[bool, Tuple[bool]] = False
snake_case__ :Tuple[int] = (320, 640, 1280, 1280)
snake_case__ :int = 2
snake_case__ :Union[int, Tuple[int]] = 8
snake_case__ :Optional[Union[int, Tuple[int]]] = None
snake_case__ :int = 1280
snake_case__ :float = 0.0
snake_case__ :bool = False
snake_case__ :jnp.dtype = jnp.floataa
snake_case__ :bool = True
snake_case__ :int = 0
snake_case__ :bool = False
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : jax.random.KeyArray ):
"""simple docstring"""
lowerCAmelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase__ = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
lowerCAmelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase__ ,lowerCAmelCase__ = jax.random.split(__magic_name__ )
lowerCAmelCase__ = {"params": params_rng, "dropout": dropout_rng}
return self.init(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )["params"]
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.block_out_channels
lowerCAmelCase__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase__ = FlaxTimestepEmbedding(__magic_name__ , dtype=self.dtype )
lowerCAmelCase__ = self.only_cross_attention
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase__ = []
lowerCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
lowerCAmelCase__ = i == len(__magic_name__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__magic_name__ )
lowerCAmelCase__ = down_blocks
# mid
lowerCAmelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase__ = []
lowerCAmelCase__ = list(reversed(__magic_name__ ) )
lowerCAmelCase__ = list(reversed(__magic_name__ ) )
lowerCAmelCase__ = list(reversed(__magic_name__ ) )
lowerCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = reversed_block_out_channels[i]
lowerCAmelCase__ = reversed_block_out_channels[min(i + 1 , len(__magic_name__ ) - 1 )]
lowerCAmelCase__ = i == len(__magic_name__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase__ = FlaxCrossAttnUpBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , prev_output_channel=__magic_name__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxUpBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , prev_output_channel=__magic_name__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__magic_name__ )
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = up_blocks
# out
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , __magic_name__ : bool = True , __magic_name__ : bool = False , ):
"""simple docstring"""
if not isinstance(__magic_name__ , jnp.ndarray ):
lowerCAmelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__magic_name__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase__ = jnp.expand_dims(__magic_name__ , 0 )
lowerCAmelCase__ = self.time_proj(__magic_name__ )
lowerCAmelCase__ = self.time_embedding(__magic_name__ )
# 2. pre-process
lowerCAmelCase__ = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
lowerCAmelCase__ = self.conv_in(__magic_name__ )
# 3. down
lowerCAmelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ ,lowerCAmelCase__ = down_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
else:
lowerCAmelCase__ ,lowerCAmelCase__ = down_block(__magic_name__ , __magic_name__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
__magic_name__ , __magic_name__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase__ = new_down_block_res_samples
# 4. mid
lowerCAmelCase__ = self.mid_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase__ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = up_block(
__magic_name__ , temb=__magic_name__ , encoder_hidden_states=__magic_name__ , res_hidden_states_tuple=__magic_name__ , deterministic=not train , )
else:
lowerCAmelCase__ = up_block(__magic_name__ , temb=__magic_name__ , res_hidden_states_tuple=__magic_name__ , deterministic=not train )
# 6. post-process
lowerCAmelCase__ = self.conv_norm_out(__magic_name__ )
lowerCAmelCase__ = nn.silu(__magic_name__ )
lowerCAmelCase__ = self.conv_out(__magic_name__ )
lowerCAmelCase__ = jnp.transpose(__magic_name__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__magic_name__ )
| 48 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
for attribute in key.split("." ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A ( UpperCamelCase_ : Dict , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ = True
elif name.split("." )[0] == "proj":
lowerCAmelCase__ = fairseq_model.proj
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , UpperCamelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "bias" in name:
lowerCAmelCase__ = "bias"
elif "weight" in name:
lowerCAmelCase__ = "weight"
else:
lowerCAmelCase__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ = name.split("." )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase_ )
def A ( UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def A ( UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [line.split(" " )[0] for line in lines]
lowerCAmelCase__ = len(UpperCamelCase_ )
lowerCAmelCase__ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(UpperCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SpeechaTextaConfig.from_pretrained(
UpperCamelCase_ , vocab_size=UpperCamelCase_ , decoder_layers=UpperCamelCase_ , do_stable_layer_norm=UpperCamelCase_ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCAmelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ = WavaVecaModel(UpperCamelCase_ )
lowerCAmelCase__ = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
lowerCAmelCase__ = SpeechaTextaForCausalLM(UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
lowerCAmelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCAmelCase__ = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
lowerCAmelCase__ = False
# add projection layer
lowerCAmelCase__ = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ = create_vocab_dict(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , "vocab.json" ) , "w" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = SpeechaTextaTokenizer(os.path.join(UpperCamelCase_ , "vocab.json" ) )
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = hf_wavavec.config.to_dict()
lowerCAmelCase__ = tokenizer.pad_token_id
lowerCAmelCase__ = tokenizer.bos_token_id
lowerCAmelCase__ = tokenizer.eos_token_id
lowerCAmelCase__ = "speech_to_text_2"
lowerCAmelCase__ = "wav2vec2"
lowerCAmelCase__ = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
UpperCAmelCase__ : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 48 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A :
snake_case__ :Dict = None
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(__magic_name__ , "feat_extract.json" )
feat_extract_first.to_json_file(__magic_name__ )
lowerCAmelCase__ = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
lowerCAmelCase__ = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 48 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class A :
def __init__( self : Optional[Any] , __magic_name__ : str = None , __magic_name__ : uuid.UUID = None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
if not conversation_id:
lowerCAmelCase__ = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase__ = []
if generated_responses is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = conversation_id
lowerCAmelCase__ = past_user_inputs
lowerCAmelCase__ = generated_responses
lowerCAmelCase__ = text
def __eq__( self : Any , __magic_name__ : Tuple ):
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : str , __magic_name__ : bool = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowerCAmelCase__ = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowerCAmelCase__ = text
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : str ):
"""simple docstring"""
self.generated_responses.append(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowerCAmelCase__ = "user" if is_user else "bot"
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , *__magic_name__ : str , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase__ = self.tokenizer.eos_token
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , **__magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
if min_length_for_response is not None:
lowerCAmelCase__ = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase__ = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase__ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__magic_name__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , __magic_name__ : Union[Conversation, List[Conversation]] , __magic_name__ : Tuple=0 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = super().__call__(__magic_name__ , num_workers=__magic_name__ , **__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1:
return outputs[0]
return outputs
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Conversation , __magic_name__ : str=32 ):
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
lowerCAmelCase__ = self.tokenizer._build_conversation_input_ids(__magic_name__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase__ = self._legacy_parse_and_tokenize(__magic_name__ )
if self.framework == "pt":
lowerCAmelCase__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : int , __magic_name__ : Union[str, Any]=10 , **__magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length )
lowerCAmelCase__ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowerCAmelCase__ = max_length - minimum_tokens
lowerCAmelCase__ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase__ = model_inputs["attention_mask"][:, -trim:]
lowerCAmelCase__ = model_inputs.pop("conversation" )
lowerCAmelCase__ = max_length
lowerCAmelCase__ = self.model.generate(**__magic_name__ , **__magic_name__ )
if self.model.config.is_encoder_decoder:
lowerCAmelCase__ = 1
else:
lowerCAmelCase__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict=True ):
"""simple docstring"""
lowerCAmelCase__ = model_outputs["output_ids"]
lowerCAmelCase__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
lowerCAmelCase__ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(__magic_name__ )
return conversation
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Conversation ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.eos_token_id
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) )
if len(__magic_name__ ) > self.tokenizer.model_max_length:
lowerCAmelCase__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 48 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A ( SCREAMING_SNAKE_CASE__ ):
def __get__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowerCAmelCase__ = "__cached_" + self.fget.__name__
lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ , __magic_name__ )
if cached is None:
lowerCAmelCase__ = self.fget(__magic_name__ )
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
return cached
def A ( UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def A ( UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
if is_torch_fx_proxy(UpperCamelCase_ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase_ , np.ndarray )
def A ( UpperCamelCase_ : Any ) -> Any:
'''simple docstring'''
return isinstance(UpperCamelCase_ , np.ndarray )
def A ( UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return _is_numpy(UpperCamelCase_ )
def A ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase_ , torch.Tensor )
def A ( UpperCamelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(UpperCamelCase_ )
def A ( UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase_ , torch.device )
def A ( UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
else:
return False
return isinstance(UpperCamelCase_ , torch.dtype )
def A ( UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase_ )
def A ( UpperCamelCase_ : str ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(UpperCamelCase_ , tf.Tensor )
def A ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(UpperCamelCase_ )
return type(UpperCamelCase_ ) == tf.Tensor
def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase_ , jnp.ndarray )
def A ( UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(UpperCamelCase_ )
def A ( UpperCamelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return [to_py_obj(UpperCamelCase_ ) for o in obj]
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ ).tolist()
elif isinstance(UpperCamelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( UpperCamelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return np.array(UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ )
else:
return obj
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(__magic_name__ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase__ = getattr(self , class_fields[0].name )
lowerCAmelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = first_field.items()
lowerCAmelCase__ = True
else:
try:
lowerCAmelCase__ = iter(__magic_name__ )
lowerCAmelCase__ = True
except TypeError:
lowerCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__magic_name__ ):
if (
not isinstance(__magic_name__ , (list, tuple) )
or not len(__magic_name__ ) == 2
or not isinstance(element[0] , __magic_name__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase__ = element[1]
elif first_field is not None:
lowerCAmelCase__ = first_field
else:
for field in class_fields:
lowerCAmelCase__ = getattr(self , field.name )
if v is not None:
lowerCAmelCase__ = v
def __delitem__( self : Union[str, Any] , *__magic_name__ : Tuple , **__magic_name__ : Dict ):
"""simple docstring"""
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Tuple , **__magic_name__ : Dict ):
"""simple docstring"""
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__magic_name__ : List[Any] , **__magic_name__ : List[Any] ):
"""simple docstring"""
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Union[str, Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__magic_name__ , __magic_name__ )
super().__setattr__(__magic_name__ , __magic_name__ )
def __setitem__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
super().__setitem__(__magic_name__ , __magic_name__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : int , __magic_name__ : Optional[int] ):
"""simple docstring"""
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = 'longest'
snake_case__ :Optional[Any] = 'max_length'
snake_case__ :str = 'do_not_pad'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'pt'
snake_case__ :Union[str, Any] = 'tf'
snake_case__ :Any = 'np'
snake_case__ :List[str] = 'jax'
class A :
def __init__( self : Dict , __magic_name__ : List[ContextManager] ):
"""simple docstring"""
lowerCAmelCase__ = context_managers
lowerCAmelCase__ = ExitStack()
def __enter__( self : str ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__magic_name__ )
def __exit__( self : Dict , *__magic_name__ : Dict , **__magic_name__ : int ):
"""simple docstring"""
self.stack.__exit__(*__magic_name__ , **__magic_name__ )
def A ( UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = infer_framework(UpperCamelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = model_class.__name__
lowerCAmelCase__ = infer_framework(UpperCamelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( UpperCamelCase_ : MutableMapping , UpperCamelCase_ : str = "" , UpperCamelCase_ : str = "." ) -> List[Any]:
'''simple docstring'''
def _flatten_dict(UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]="" , UpperCamelCase_ : Any="." ):
for k, v in d.items():
lowerCAmelCase__ = str(UpperCamelCase_ ) + delimiter + str(UpperCamelCase_ ) if parent_key else k
if v and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
yield from flatten_dict(UpperCamelCase_ , UpperCamelCase_ , delimiter=UpperCamelCase_ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
@contextmanager
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = False ) -> Any:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.T if axes is None else array.permute(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase_ , perm=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Dict:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.reshape(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.reshape(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=None ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.expand_dims(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.unsqueeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.size(UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.size(UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(UpperCamelCase_ , (tuple, list) ):
lowerCAmelCase__ = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ = F"""{repo_id}--{value}"""
return auto_map
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
for base_class in inspect.getmro(UpperCamelCase_ ):
lowerCAmelCase__ = base_class.__module__
lowerCAmelCase__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 48 |
'''simple docstring'''
from math import sqrt
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def A ( UpperCamelCase_ : int = 1_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> str | Literal[False]:
'''simple docstring'''
lowerCAmelCase__ = list(UpperCamelCase_ )
lowerCAmelCase__ = list(UpperCamelCase_ )
lowerCAmelCase__ = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase__ = "_"
if count > 1:
return False
else:
return "".join(UpperCamelCase_ )
def A ( UpperCamelCase_ : list[str] ) -> list[str]:
'''simple docstring'''
lowerCAmelCase__ = []
while True:
lowerCAmelCase__ = ["$"] * len(UpperCamelCase_ )
lowerCAmelCase__ = []
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + 1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = compare_string(binary[i] , binary[j] )
if k is False:
lowerCAmelCase__ = "*"
lowerCAmelCase__ = "*"
temp.append("X" )
for i in range(len(UpperCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase_ ) == 0:
return pi
lowerCAmelCase__ = list(set(UpperCamelCase_ ) )
def A ( UpperCamelCase_ : int , UpperCamelCase_ : Sequence[float] ) -> list[str]:
'''simple docstring'''
lowerCAmelCase__ = []
for minterm in minterms:
lowerCAmelCase__ = ""
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase_ )
return temp
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = list(UpperCamelCase_ )
lowerCAmelCase__ = list(UpperCamelCase_ )
lowerCAmelCase__ = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A ( UpperCamelCase_ : list[list[int]] , UpperCamelCase_ : list[str] ) -> list[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
for i in range(len(chart[0] ) ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = -1
for j in range(len(UpperCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
lowerCAmelCase__ = j
if count == 1:
lowerCAmelCase__ = 1
for i in range(len(UpperCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase_ ) ):
lowerCAmelCase__ = 0
temp.append(prime_implicants[i] )
while True:
lowerCAmelCase__ = 0
lowerCAmelCase__ = -1
lowerCAmelCase__ = 0
for i in range(len(UpperCamelCase_ ) ):
lowerCAmelCase__ = chart[i].count(1 )
if count_n > max_n:
lowerCAmelCase__ = count_n
lowerCAmelCase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase_ ) ):
lowerCAmelCase__ = 0
def A ( UpperCamelCase_ : list[str] , UpperCamelCase_ : list[str] ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase__ = [[0 for x in range(len(UpperCamelCase_ ) )] for x in range(len(UpperCamelCase_ ) )]
for i in range(len(UpperCamelCase_ ) ):
lowerCAmelCase__ = prime_implicants[i].count("_" )
for j in range(len(UpperCamelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , UpperCamelCase_ ):
lowerCAmelCase__ = 1
return chart
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = int(input("Enter the no. of variables\n" ) )
lowerCAmelCase__ = [
float(UpperCamelCase_ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
lowerCAmelCase__ = decimal_to_binary(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = check(UpperCamelCase_ )
print("Prime Implicants are:" )
print(UpperCamelCase_ )
lowerCAmelCase__ = prime_implicant_chart(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = selection(UpperCamelCase_ , UpperCamelCase_ )
print("Essential Prime Implicants are:" )
print(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[str] = ['pixel_values']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Optional[Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = size_divisor
lowerCAmelCase__ = resample
super().__init__(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase__ = height // size_divisor * size_divisor
lowerCAmelCase__ = width // size_divisor * size_divisor
lowerCAmelCase__ = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
lowerCAmelCase__ = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(__magic_name__ , scale=1 / 255 ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 48 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = AlbertConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = AlbertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 48 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 1 |
'''simple docstring'''
import argparse
import json
import subprocess
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
lowerCAmelCase__ = subprocess.run(UpperCamelCase_ , shell=UpperCamelCase_ , stdout=subprocess.PIPE )
lowerCAmelCase__ = output.stdout.decode("utf-8" )
lowerCAmelCase__ = json.loads(UpperCamelCase_ )
lowerCAmelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase_ )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def A ( UpperCamelCase_ : List[Any] ) -> int:
'''simple docstring'''
return values.split("," )
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
UpperCAmelCase__ : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 48 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def A ( UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase__ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCAmelCase__ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCAmelCase__ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCAmelCase__ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCAmelCase__ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCAmelCase__ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCAmelCase__ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCAmelCase__ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCAmelCase__ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCAmelCase__ = key.replace("image_encoder.module" , "flava.image_model" )
lowerCAmelCase__ = key.replace("text_encoder.module" , "flava.text_model" )
lowerCAmelCase__ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCAmelCase__ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCAmelCase__ = key.replace("text_projection" , "flava.text_projection" )
lowerCAmelCase__ = key.replace("image_projection" , "flava.image_projection" )
lowerCAmelCase__ = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase__ = value
return upgrade
@torch.no_grad()
def A ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=None ) -> Dict:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase__ = FlavaConfig.from_pretrained(UpperCamelCase_ )
else:
lowerCAmelCase__ = FlavaConfig()
lowerCAmelCase__ = FlavaForPreTraining(UpperCamelCase_ ).eval()
lowerCAmelCase__ = convert_dalle_checkpoint(UpperCamelCase_ , UpperCamelCase_ , save_checkpoint=UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ):
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )
else:
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )
lowerCAmelCase__ = upgrade_state_dict(UpperCamelCase_ , UpperCamelCase_ )
hf_model.load_state_dict(UpperCamelCase_ )
lowerCAmelCase__ = hf_model.state_dict()
lowerCAmelCase__ = count_parameters(UpperCamelCase_ )
lowerCAmelCase__ = count_parameters(UpperCamelCase_ ) + count_parameters(UpperCamelCase_ )
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCAmelCase__ : int = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 48 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase__ = RemBertConfig.from_json_file(UpperCamelCase_ )
print("Building PyTorch model from configuration: {}".format(str(UpperCamelCase_ ) ) )
lowerCAmelCase__ = RemBertModel(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print("Save PyTorch model to {}".format(UpperCamelCase_ ) )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 48 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 1 |
'''simple docstring'''
UpperCAmelCase__ : int = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : List[str] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[PIL.Image.Image, np.ndarray]
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : List[Any] , __magic_name__ : PriorTransformer , __magic_name__ : CLIPVisionModel , __magic_name__ : CLIPImageProcessor , __magic_name__ : HeunDiscreteScheduler , __magic_name__ : ShapERenderer , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=__magic_name__ , image_encoder=__magic_name__ , image_processor=__magic_name__ , scheduler=__magic_name__ , renderer=__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ):
"""simple docstring"""
if latents is None:
lowerCAmelCase__ = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase__ = latents.to(__magic_name__ )
lowerCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Union[str, Any]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCAmelCase__ = torch.device(f"""cuda:{gpu_id}""" )
lowerCAmelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__magic_name__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Any , ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ) and isinstance(image[0] , torch.Tensor ):
lowerCAmelCase__ = torch.cat(__magic_name__ , axis=0 ) if image[0].ndim == 4 else torch.stack(__magic_name__ , axis=0 )
if not isinstance(__magic_name__ , torch.Tensor ):
lowerCAmelCase__ = self.image_processor(__magic_name__ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCAmelCase__ = image.to(dtype=self.image_encoder.dtype , device=__magic_name__ )
lowerCAmelCase__ = self.image_encoder(__magic_name__ )["last_hidden_state"]
lowerCAmelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase__ = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ = torch.zeros_like(__magic_name__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__( self : str , __magic_name__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , __magic_name__ : int = 1 , __magic_name__ : int = 25 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : float = 4.0 , __magic_name__ : int = 64 , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , ):
"""simple docstring"""
if isinstance(__magic_name__ , PIL.Image.Image ):
lowerCAmelCase__ = 1
elif isinstance(__magic_name__ , torch.Tensor ):
lowerCAmelCase__ = image.shape[0]
elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCAmelCase__ = len(__magic_name__ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__magic_name__ )}""" )
lowerCAmelCase__ = self._execution_device
lowerCAmelCase__ = batch_size * num_images_per_prompt
lowerCAmelCase__ = guidance_scale > 1.0
lowerCAmelCase__ = self._encode_image(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# prior
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
lowerCAmelCase__ = self.scheduler.timesteps
lowerCAmelCase__ = self.prior.config.num_embeddings
lowerCAmelCase__ = self.prior.config.embedding_dim
lowerCAmelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __magic_name__ , __magic_name__ , __magic_name__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase__ = latents.reshape(latents.shape[0] , __magic_name__ , __magic_name__ )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = self.prior(
__magic_name__ , timestep=__magic_name__ , proj_embedding=__magic_name__ , ).predicted_image_embedding
# remove the variance
lowerCAmelCase__ ,lowerCAmelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase__ ,lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase__ = self.scheduler.step(
__magic_name__ , timestep=__magic_name__ , sample=__magic_name__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__magic_name__ )
lowerCAmelCase__ = []
for i, latent in enumerate(__magic_name__ ):
print()
lowerCAmelCase__ = self.renderer.decode(
latent[None, :] , __magic_name__ , size=__magic_name__ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__magic_name__ )
lowerCAmelCase__ = torch.stack(__magic_name__ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowerCAmelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase__ = [self.numpy_to_pil(__magic_name__ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__magic_name__ )
| 48 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : str ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
while i * i <= n:
lowerCAmelCase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCamelCase_ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 48 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Optional[Any] = VQModel
snake_case__ :str = 'sample'
@property
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict=(32, 32) ):
"""simple docstring"""
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
return {"sample": image}
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return (3, 32, 32)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__magic_name__ )
lowerCAmelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__magic_name__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase__ = image.to(__magic_name__ )
with torch.no_grad():
lowerCAmelCase__ = model(__magic_name__ ).sample
lowerCAmelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
| 48 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
UpperCAmelCase__ : Any = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
UpperCAmelCase__ : int = {
"abeja/gpt-neox-japanese-2.7b": 20_48,
}
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> List[Any]:
'''simple docstring'''
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = b
lowerCAmelCase__ = idx
for wd in b:
lowerCAmelCase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[str] = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :int = ['input_ids', 'attention_mask']
def __init__( self : Dict , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Dict="<|endoftext|>" , __magic_name__ : List[str]="<|endoftext|>" , __magic_name__ : Tuple="<|startoftext|>" , __magic_name__ : Optional[int]="<|endoftext|>" , __magic_name__ : List[Any]=False , **__magic_name__ : Dict , ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , pad_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , do_clean_text=__magic_name__ , **__magic_name__ , )
if not os.path.isfile(__magic_name__ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__magic_name__ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
lowerCAmelCase__ = do_clean_text
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = load_vocab_and_emoji(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return len(self.raw_vocab )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(__magic_name__ , clean=self.do_clean_text )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Any ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = "".join(__magic_name__ ).strip()
return out_string
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : "Conversation" ):
"""simple docstring"""
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [self.eos_token_id] )
if len(__magic_name__ ) > self.model_max_length:
lowerCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
lowerCAmelCase__ = 0
if os.path.isdir(__magic_name__ ):
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
lowerCAmelCase__ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
lowerCAmelCase__ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__magic_name__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase__ = token_index
writer.write(",".join(__magic_name__ ) + "\n" )
index += 1
with open(__magic_name__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __magic_name__ )
return vocab_file, emoji_file
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : List[str] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = vocab # same as swe
lowerCAmelCase__ = ids_to_tokens # same as bpe
lowerCAmelCase__ = emoji
lowerCAmelCase__ = np.max([len(__magic_name__ ) for w in self.vocab.keys()] )
lowerCAmelCase__ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
lowerCAmelCase__ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
lowerCAmelCase__ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
lowerCAmelCase__ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase__ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase__ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
lowerCAmelCase__ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
lowerCAmelCase__ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
lowerCAmelCase__ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Dict ):
"""simple docstring"""
return len(self.ids_to_tokens )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.content_repattera.sub("<URL>" , __magic_name__ )
lowerCAmelCase__ = self.content_repattera.sub("<EMAIL>" , __magic_name__ )
lowerCAmelCase__ = self.content_repattera.sub("<TEL>" , __magic_name__ )
lowerCAmelCase__ = self.content_repattera.sub("<DATE>" , __magic_name__ )
lowerCAmelCase__ = self.content_repattera.sub("<DATE>" , __magic_name__ )
lowerCAmelCase__ = self.content_repattera.sub("<PRICE>" , __magic_name__ )
lowerCAmelCase__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Dict , __magic_name__ : Optional[int]=False ):
"""simple docstring"""
lowerCAmelCase__ = text.replace(" " , "<SP>" )
lowerCAmelCase__ = text.replace(" " , "<SP>" )
lowerCAmelCase__ = text.replace("\r\n" , "<BR>" )
lowerCAmelCase__ = text.replace("\n" , "<BR>" )
lowerCAmelCase__ = text.replace("\r" , "<BR>" )
lowerCAmelCase__ = text.replace("\t" , "<TAB>" )
lowerCAmelCase__ = text.replace("—" , "ー" )
lowerCAmelCase__ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ = text.replace(__magic_name__ , __magic_name__ )
if clean:
lowerCAmelCase__ = self.clean_text(__magic_name__ )
def check_simbol(__magic_name__ : int ):
lowerCAmelCase__ = x.encode()
if len(__magic_name__ ) == 1 and len(__magic_name__ ) == 2:
lowerCAmelCase__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(__magic_name__ : int ):
lowerCAmelCase__ = x.encode()
if len(__magic_name__ ) == 1 and len(__magic_name__ ) == 3:
lowerCAmelCase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
while pos < len(__magic_name__ ):
lowerCAmelCase__ = min(len(__magic_name__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
lowerCAmelCase__ = [] # (token_id, token, pos)
for e in range(__magic_name__ , __magic_name__ , -1 ):
lowerCAmelCase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__magic_name__ ) > 2:
lowerCAmelCase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__magic_name__ ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = sorted(__magic_name__ , key=lambda __magic_name__ : x[0] )[0]
result.append(__magic_name__ )
lowerCAmelCase__ = e
else:
lowerCAmelCase__ = pos + 1
lowerCAmelCase__ = text[pos:end]
if check_simbol(__magic_name__ ):
result.append("<KIGOU>" )
elif checkuae(__magic_name__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
lowerCAmelCase__ = end
return result
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : str="\n" ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__magic_name__ ) > 0:
words.append(bytearray(__magic_name__ ).decode("utf-8" , errors="replace" ) )
lowerCAmelCase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__magic_name__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__magic_name__ )
if len(__magic_name__ ) > 0:
words.append(bytearray(__magic_name__ ).decode("utf-8" , errors="replace" ) )
lowerCAmelCase__ = "".join(__magic_name__ )
return text
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 1 |
'''simple docstring'''
import random
def A ( UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : bool = False ) -> dict:
'''simple docstring'''
lowerCAmelCase__ = {i: [] for i in range(UpperCamelCase_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if random.random() < probability:
graph[i].append(UpperCamelCase_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase_ )
return graph
def A ( UpperCamelCase_ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase_ ) if i != j] for i in range(UpperCamelCase_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase__ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
UpperCAmelCase__ : List[Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase__ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
snake_case__ :Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
snake_case__ :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the training data.'} )
snake_case__ :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the validation data.'} )
snake_case__ :Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
snake_case__ :int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
snake_case__ :float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = {}
if self.train_dir is not None:
lowerCAmelCase__ = self.train_dir
if self.validation_dir is not None:
lowerCAmelCase__ = self.validation_dir
lowerCAmelCase__ = data_files if data_files else None
@dataclass
class A :
snake_case__ :str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
snake_case__ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case__ :str = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'} )
snake_case__ :bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Stride to use for the encoder.'} , )
class A :
def __init__( self : Any , __magic_name__ : List[Any]=192 , __magic_name__ : int=32 , __magic_name__ : Dict=4 , __magic_name__ : List[Any]=0.6 ):
"""simple docstring"""
lowerCAmelCase__ = input_size
lowerCAmelCase__ = mask_patch_size
lowerCAmelCase__ = model_patch_size
lowerCAmelCase__ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
lowerCAmelCase__ = self.input_size // self.mask_patch_size
lowerCAmelCase__ = self.mask_patch_size // self.model_patch_size
lowerCAmelCase__ = self.rand_size**2
lowerCAmelCase__ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = np.random.permutation(self.token_count )[: self.mask_count]
lowerCAmelCase__ = np.zeros(self.token_count , dtype=__magic_name__ )
lowerCAmelCase__ = 1
lowerCAmelCase__ = mask.reshape((self.rand_size, self.rand_size) )
lowerCAmelCase__ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = torch.stack([example["pixel_values"] for example in examples] )
lowerCAmelCase__ = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase__ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase_ ) and data_args.train_val_split > 0.0:
lowerCAmelCase__ = ds["train"].train_test_split(data_args.train_val_split )
lowerCAmelCase__ = split["train"]
lowerCAmelCase__ = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
lowerCAmelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCamelCase_ , "decoder_type" ):
lowerCAmelCase__ = "simmim"
# adapt config
lowerCAmelCase__ = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCAmelCase__ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCAmelCase__ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
lowerCAmelCase__ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCAmelCase__ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCAmelCase__ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCAmelCase__ = AutoModelForMaskedImageModeling.from_config(UpperCamelCase_ )
if training_args.do_train:
lowerCAmelCase__ = ds["train"].column_names
else:
lowerCAmelCase__ = ds["validation"].column_names
if data_args.image_column_name is not None:
lowerCAmelCase__ = data_args.image_column_name
elif "image" in column_names:
lowerCAmelCase__ = "image"
elif "img" in column_names:
lowerCAmelCase__ = "img"
else:
lowerCAmelCase__ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCAmelCase__ = Compose(
[
Lambda(lambda UpperCamelCase_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowerCAmelCase__ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ = [transforms(UpperCamelCase_ ) for image in examples[image_column_name]]
lowerCAmelCase__ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCAmelCase__ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase_ )
# Initialize our trainer
lowerCAmelCase__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , UpperCamelCase_ )
trainer.save_metrics("eval" , UpperCamelCase_ )
# Write model card and (optionally) push to hub
lowerCAmelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
if __name__ == "__main__":
main()
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCAmelCase__ : str = False
class A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
image=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowerCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 48 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 1 |
'''simple docstring'''
def A ( ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
while len(UpperCamelCase_ ) < 1E6:
constant.append(str(UpperCamelCase_ ) )
i += 1
lowerCAmelCase__ = "".join(UpperCamelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 48 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = 0
while b > 0:
if b & 1:
lowerCAmelCase__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 48 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = StableUnCLIPPipeline
snake_case__ :int = TEXT_TO_IMAGE_PARAMS
snake_case__ :str = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ :str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
snake_case__ :Tuple = False
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = 32
lowerCAmelCase__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__magic_name__ , projection_dim=__magic_name__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__magic_name__ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__magic_name__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ = StableUnCLIPImageNormalizer(embedding_dim=__magic_name__ )
lowerCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__magic_name__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__magic_name__ , layers_per_block=1 , upcast_attention=__magic_name__ , use_linear_projection=__magic_name__ , )
torch.manual_seed(0 )
lowerCAmelCase__ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__magic_name__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL()
lowerCAmelCase__ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(__magic_name__ )
else:
lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__magic_name__ )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCAmelCase__ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ = pipe("anime turle" , generator=__magic_name__ , output_type="np" )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 48 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A ( UpperCamelCase_ : Dict , UpperCamelCase_ : str=0 ) -> Optional[int]:
'''simple docstring'''
return sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[column] )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[str]=float("inf" ) ) -> Any:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase_ ):
lowerCAmelCase__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase__ = current_dis
return min_dis
def A ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple=float("inf" ) ) -> Union[str, Any]:
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase_ ):
for j in range(max(0 , i - 6 ) , UpperCamelCase_ ):
lowerCAmelCase__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase__ = current_dis
return min_dis
def A ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase_ , UpperCamelCase_ )
# recursion
lowerCAmelCase__ = points_counts // 2
lowerCAmelCase__ = closest_pair_of_points_sqr(
UpperCamelCase_ , points_sorted_on_y[:mid] , UpperCamelCase_ )
lowerCAmelCase__ = closest_pair_of_points_sqr(
UpperCamelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCAmelCase__ = min(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase_ )
lowerCAmelCase__ = dis_between_closest_in_strip(
UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ )
return min(UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase__ = column_based_sort(UpperCamelCase_ , column=0 )
lowerCAmelCase__ = column_based_sort(UpperCamelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase__ : str = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 48 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.