code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import torch
from torch import nn
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Any ,_snake_case : str ,_snake_case : List[Any] ,_snake_case : str ,_snake_case : Optional[Any]=1 ,_snake_case : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = n_token
lowercase__ : List[str] = d_embed
lowercase__ : int = d_proj
lowercase__ : Union[str, Any] = cutoffs + [n_token]
lowercase__ : Optional[Any] = [0] + self.cutoffs
lowercase__ : Optional[Any] = div_val
lowercase__ : Dict = self.cutoffs[0]
lowercase__ : str = len(self.cutoffs ) - 1
lowercase__ : Optional[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase__ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
lowercase__ : Any = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase__ : Dict = nn.ModuleList()
lowercase__ : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
else:
self.out_projs.append(_snake_case )
self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) )
lowercase__ : Union[str, Any] = keep_order
def UpperCAmelCase ( self : List[Any] ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
if proj is None:
lowercase__ : List[Any] = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase__ : List[str] = nn.functional.linear(_snake_case ,proj.t().contiguous() )
lowercase__ : Tuple = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self : Dict ,_snake_case : List[str] ,_snake_case : Dict=None ,_snake_case : Dict=False ) -> Optional[int]:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
lowercase__ : List[str] = hidden[..., :-1, :].contiguous()
lowercase__ : List[str] = labels[..., 1:].contiguous()
lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) )
lowercase__ : int = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase__ : int = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
lowercase__ : Dict = labels != -100
lowercase__ : Union[str, Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
lowercase__ : List[str] = (
-nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase__ : str = nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
lowercase__ , lowercase__ : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase__ , lowercase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[str] = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ : Union[str, Any] = self.out_layers[i].weight
lowercase__ : List[str] = self.out_layers[i].bias
if i == 0:
lowercase__ : int = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
lowercase__ : Tuple = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowercase__ : Optional[int] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Tuple = nn.functional.log_softmax(_snake_case ,dim=1 )
if labels is None:
lowercase__ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase__ : List[Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
lowercase__ : Any = 0
lowercase__ : Optional[int] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
lowercase__ , lowercase__ : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase__ : Dict = (labels >= l_idx) & (labels < r_idx)
lowercase__ : Optional[int] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase__ : Optional[int] = labels.index_select(0 ,_snake_case ) - l_idx
lowercase__ : Tuple = head_logprob.index_select(0 ,_snake_case )
lowercase__ : List[Any] = hidden.index_select(0 ,_snake_case )
else:
lowercase__ : int = hidden
if i == 0:
if labels is not None:
lowercase__ : str = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[i], biases[i], self.out_projs[i]
lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
lowercase__ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
lowercase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase__ : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,_snake_case ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str] ) -> int:
"""simple docstring"""
if self.n_clusters == 0:
lowercase__ : List[Any] = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
lowercase__ , lowercase__ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase__ , lowercase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ : List[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ : Optional[int] = self.out_layers[i].weight
lowercase__ : int = self.out_layers[i].bias
if i == 0:
lowercase__ : str = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
lowercase__ : Dict = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
lowercase__ , lowercase__ , lowercase__ : List[str] = weights[0], biases[0], self.out_projs[0]
lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase__ : List[Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
lowercase__ : Optional[Any] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
lowercase__ , lowercase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = weights[i], biases[i], self.out_projs[i]
lowercase__ : Any = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Any = nn.functional.log_softmax(_snake_case ,dim=1 )
lowercase__ : Any = head_logprob[:, -i] + tail_logprob_i
lowercase__ : str = logprob_i
return out
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
lowerCAmelCase_ = TypeVar('U')
class __A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : T | None ,_snake_case : U | None ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = key
lowercase__ : Dict = val
lowercase__ : DoubleLinkedListNode[T, U] | None = None
lowercase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Any ) -> str:
"""simple docstring"""
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class __A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
"""simple docstring"""
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_snake_case ,_snake_case )
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_snake_case ,_snake_case )
lowercase__ , lowercase__ : Any = self.rear, self.head
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Any = ['''DoubleLinkedList''']
lowercase__ : Dict = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
lowercase__ : str = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : DoubleLinkedListNode[T, U] ) -> None:
"""simple docstring"""
lowercase__ : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase__ : str = node
lowercase__ : str = previous
lowercase__ : List[Any] = node
lowercase__ : Tuple = self.rear
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
lowercase__ : Dict = node.next
lowercase__ : Any = node.prev
lowercase__ : int = None
lowercase__ : int = None
return node
class __A ( Generic[T, U] ):
'''simple docstring'''
lowerCAmelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Any ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowercase__ : Optional[Any] = capacity
lowercase__ : Tuple = 0
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
lowercase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : str ) -> str:
"""simple docstring"""
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Optional[int] ,_snake_case : T ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase ( self : Any ,_snake_case : T ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
lowercase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowercase__ : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def UpperCAmelCase ( self : int ,_snake_case : T ,_snake_case : U ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase__ : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase__ : Union[str, Any] = DoubleLinkedListNode(_snake_case ,_snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase__ : List[str] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase__ : str = value
self.list.add(_snake_case )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] ,_snake_case : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(_snake_case : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase__ : Tuple = LRUCache(_snake_case )
lowercase__ : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase__ : List[str] = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] ,_snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case ,'''cache_info''' ,_snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def __UpperCAmelCase ( __lowerCamelCase ) -> bytes:
lowercase__ : List[str] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase__ : Optional[int] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__lowerCamelCase ).content
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter Video/IGTV url: ').strip()
lowerCAmelCase_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 16 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCAmelCase_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowerCAmelCase_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,reference_urls=[] ,)
def UpperCAmelCase ( self : Any ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : Tuple=None ,_snake_case : Optional[Any]=False ,_snake_case : Tuple=False ,_snake_case : Optional[int]=False ,) -> List[str]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase__ : Dict = np.array([re.sub(_snake_case ,'''''' ,_snake_case ) for x in predictions] )
lowercase__ : Optional[Any] = np.array([re.sub(_snake_case ,'''''' ,_snake_case ) for x in references] )
else:
lowercase__ : Optional[Any] = np.asarray(_snake_case )
lowercase__ : Any = np.asarray(_snake_case )
if ignore_case:
lowercase__ : List[str] = np.char.lower(_snake_case )
lowercase__ : Tuple = np.char.lower(_snake_case )
if ignore_punctuation:
lowercase__ : Union[str, Any] = string.punctuation.maketrans('''''' ,'''''' ,string.punctuation )
lowercase__ : int = np.char.translate(_snake_case ,table=_snake_case )
lowercase__ : Optional[int] = np.char.translate(_snake_case ,table=_snake_case )
if ignore_numbers:
lowercase__ : Optional[Any] = string.digits.maketrans('''''' ,'''''' ,string.digits )
lowercase__ : Tuple = np.char.translate(_snake_case ,table=_snake_case )
lowercase__ : List[str] = np.char.translate(_snake_case ,table=_snake_case )
lowercase__ : Any = predictions == references
return {"exact_match": np.mean(_snake_case ) * 100}
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
lowercase__ : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
lowercase__ : Dict = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
return image
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
if "visual_encoder" in key:
lowercase__ : Any = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __lowerCamelCase )
if "blocks" in key:
lowercase__ : List[str] = re.sub(r'''blocks''' , '''layers''' , __lowerCamelCase )
if "attn" in key:
lowercase__ : List[str] = re.sub(r'''attn''' , '''self_attn''' , __lowerCamelCase )
if "norm1" in key:
lowercase__ : Dict = re.sub(r'''norm1''' , '''layer_norm1''' , __lowerCamelCase )
if "norm2" in key:
lowercase__ : Optional[Any] = re.sub(r'''norm2''' , '''layer_norm2''' , __lowerCamelCase )
if "encoder.norm" in key:
lowercase__ : Union[str, Any] = re.sub(r'''encoder.norm''' , '''post_layernorm''' , __lowerCamelCase )
if "encoder.patch_embed.proj" in key:
lowercase__ : int = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __lowerCamelCase )
if "encoder.pos_embed" in key:
lowercase__ : int = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __lowerCamelCase )
if "encoder.cls_token" in key:
lowercase__ : Dict = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , __lowerCamelCase )
if "self_attn" in key:
lowercase__ : Optional[int] = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , __lowerCamelCase )
return key
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Optional[int]:
if config_path is not None:
lowercase__ : Union[str, Any] = BlipConfig.from_pretrained(__lowerCamelCase )
else:
lowercase__ : str = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
lowercase__ : Dict = BlipForConditionalGeneration(__lowerCamelCase ).eval()
lowercase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ : Optional[int] = blip_decoder(pretrained=__lowerCamelCase , image_size=3_84 , vit='''base''' )
lowercase__ : Optional[int] = pt_model.eval()
lowercase__ : Union[str, Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : Any = modified_state_dict.pop(__lowerCamelCase )
lowercase__ : Union[str, Any] = rename_key(__lowerCamelCase )
lowercase__ : Union[str, Any] = value
hf_model.load_state_dict(__lowerCamelCase )
lowercase__ : int = 3_84
lowercase__ : Optional[int] = load_demo_image(image_size=__lowerCamelCase , device='''cpu''' )
lowercase__ : str = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ : Dict = tokenizer(['''a picture of'''] ).input_ids
lowercase__ : Tuple = hf_model.generate(__lowerCamelCase , __lowerCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
lowercase__ : Tuple = hf_model.generate(__lowerCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ : int = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ : Optional[Any] = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit='''base''' )
vqa_model.eval()
lowercase__ : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : str = modified_state_dict.pop(__lowerCamelCase )
lowercase__ : Optional[Any] = rename_key(__lowerCamelCase )
lowercase__ : List[Any] = value
lowercase__ : Tuple = BlipForQuestionAnswering(__lowerCamelCase )
hf_vqa_model.load_state_dict(__lowerCamelCase )
lowercase__ : Union[str, Any] = ['''How many dogs are in this image?''']
lowercase__ : Tuple = tokenizer(__lowerCamelCase , return_tensors='''pt''' ).input_ids
lowercase__ : Optional[int] = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ : str = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit='''base''' )
itm_model.eval()
lowercase__ : str = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : str = modified_state_dict.pop(__lowerCamelCase )
lowercase__ : List[Any] = rename_key(__lowerCamelCase )
lowercase__ : Union[str, Any] = value
lowercase__ : Union[str, Any] = BlipForImageTextRetrieval(__lowerCamelCase )
lowercase__ : Any = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ : Optional[Any] = tokenizer(
__lowerCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=__lowerCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCamelCase )
hf_itm_model.eval()
lowercase__ : Optional[Any] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
lowercase__ : Any = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 16 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : int ,_snake_case : int ,_snake_case : float = 0 ) -> None:
"""simple docstring"""
lowercase__ , lowercase__ : int = row, column
lowercase__ : Any = [[default_value for c in range(_snake_case )] for r in range(_snake_case )]
def __str__( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : List[str] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowercase__ : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
lowercase__ : str = max(_snake_case ,len(str(_snake_case ) ) )
lowercase__ : Tuple = f"""%{max_element_length}s"""
# Make string and return
def single_line(_snake_case : list[float] ) -> str:
nonlocal string_format_identifier
lowercase__ : Dict = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_snake_case ) for row_vector in self.array )
return s
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return str(self )
def UpperCAmelCase ( self : Tuple ,_snake_case : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(_snake_case ,(list, tuple) ) and len(_snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] ,_snake_case : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(_snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Union[str, Any] ,_snake_case : tuple[int, int] ,_snake_case : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(_snake_case )
lowercase__ : Any = value
def __add__( self : int ,_snake_case : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(_snake_case ,_snake_case )
assert self.row == another.row and self.column == another.column
# Add
lowercase__ : List[Any] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Dict ) -> Matrix:
"""simple docstring"""
lowercase__ : Dict = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : List[str] = -self[r, c]
return result
def __sub__( self : int ,_snake_case : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : int ,_snake_case : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(_snake_case ,(int, float) ): # Scalar multiplication
lowercase__ : str = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : Tuple = self[r, c] * another
return result
elif isinstance(_snake_case ,_snake_case ): # Matrix multiplication
assert self.column == another.row
lowercase__ : Any = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase__ : Union[str, Any] = f"""Unsupported type given for another ({type(_snake_case )})"""
raise TypeError(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Matrix:
"""simple docstring"""
lowercase__ : int = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : Dict = self[r, c]
return result
def UpperCAmelCase ( self : int ,_snake_case : Matrix ,_snake_case : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase__ : Dict = v.transpose()
lowercase__ : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __UpperCAmelCase ( ) -> None:
# a^(-1)
lowercase__ : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase__ : List[Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
lowercase__ : Tuple = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ : Dict = 1, 2, -3
lowercase__ : List[Any] = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ : str = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCamelCase , __lowerCamelCase )}""" )
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __A ( A_ ,A_ ):
'''simple docstring'''
lowerCAmelCase : Any = "focalnet"
def __init__( self : Any ,_snake_case : Optional[int]=224 ,_snake_case : Tuple=4 ,_snake_case : Optional[Any]=3 ,_snake_case : Optional[Any]=96 ,_snake_case : List[Any]=False ,_snake_case : Optional[int]=[192, 384, 768, 768] ,_snake_case : Optional[Any]=[2, 2, 6, 2] ,_snake_case : Optional[int]=[2, 2, 2, 2] ,_snake_case : List[str]=[3, 3, 3, 3] ,_snake_case : List[str]="gelu" ,_snake_case : Dict=4.0 ,_snake_case : str=0.0 ,_snake_case : Any=0.1 ,_snake_case : Dict=False ,_snake_case : List[Any]=1e-4 ,_snake_case : Dict=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[Any]=False ,_snake_case : Dict=0.02 ,_snake_case : Optional[int]=1e-5 ,_snake_case : Optional[int]=32 ,_snake_case : Any=None ,_snake_case : List[Any]=None ,**_snake_case : List[Any] ,) -> Tuple:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Optional[Any] = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = embed_dim
lowercase__ : List[str] = use_conv_embed
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : Dict = depths
lowercase__ : List[Any] = focal_levels
lowercase__ : Tuple = focal_windows
lowercase__ : str = hidden_act
lowercase__ : Tuple = mlp_ratio
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : str = drop_path_rate
lowercase__ : str = use_layerscale
lowercase__ : List[Any] = layerscale_value
lowercase__ : Optional[Any] = use_post_layernorm
lowercase__ : str = use_post_layernorm_in_modulation
lowercase__ : Dict = normalize_modulator
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = layer_norm_eps
lowercase__ : Any = encoder_stride
lowercase__ : int = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowercase__ , lowercase__ : Tuple = get_aligned_output_features_output_indices(
out_features=_snake_case ,out_indices=_snake_case ,stage_names=self.stage_names )
| 16 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase_ = '</w>'
lowerCAmelCase_ = '@@ '
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Any = set()
lowercase__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Any = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase_ = {'facebook/s2t-wav2vec2-large-en-de': 1_024}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Union[str, Any] ,_snake_case : str="<s>" ,_snake_case : List[Any]="<pad>" ,_snake_case : int="</s>" ,_snake_case : str="<unk>" ,_snake_case : str=False ,_snake_case : List[Any]=None ,**_snake_case : Tuple ,) -> int:
"""simple docstring"""
super().__init__(
unk_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,do_lower_case=_snake_case ,**_snake_case ,)
lowercase__ : Optional[int] = do_lower_case
with open(_snake_case ,encoding='''utf-8''' ) as vocab_handle:
lowercase__ : List[str] = json.load(_snake_case )
lowercase__ : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
lowercase__ : Union[str, Any] = None
lowercase__ : int = None
else:
with open(_snake_case ,encoding='''utf-8''' ) as merges_handle:
lowercase__ : Tuple = merges_handle.read().split('''\n''' )[:-1]
lowercase__ : Optional[int] = [tuple(merge.split()[:2] ) for merge in merges]
lowercase__ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Any = {}
@property
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.decoder )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase__ : List[str] = get_pairs(_snake_case )
if not pairs:
return token
while True:
lowercase__ : List[str] = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Tuple = bigram
lowercase__ : int = []
lowercase__ : int = 0
while i < len(_snake_case ):
try:
lowercase__ : Any = word.index(_snake_case ,_snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Tuple = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : List[Any] = tuple(_snake_case )
lowercase__ : Dict = new_word
if len(_snake_case ) == 1:
break
else:
lowercase__ : List[Any] = get_pairs(_snake_case )
lowercase__ : Optional[Any] = ''' '''.join(_snake_case )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase__ : Union[str, Any] = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case ):
lowercase__ : Optional[int] = word.replace(_snake_case ,'''''' )
lowercase__ : Union[str, Any] = word.replace(''' ''' ,_snake_case )
lowercase__ : Optional[Any] = word
return word
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowercase__ : Tuple = text.lower()
lowercase__ : Optional[Any] = text.split()
lowercase__ : Dict = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ) -> int:
"""simple docstring"""
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = self.decoder.get(_snake_case ,self.unk_token )
return result
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = ''' '''.join(_snake_case )
# make sure @@ tokens are concatenated
lowercase__ : Tuple = ''''''.join(string.split(_snake_case ) )
return string
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : int = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + '''\n''' )
lowercase__ : List[str] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ : Tuple = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase : Optional[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''pt''' )
lowercase__ : Union[str, Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowercase__ : List[Any] = text_classifier('''This is great !''' ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ) ,[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
lowercase__ : Tuple = text_classifier(['''This is great !''', '''This is bad'''] ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ) ,[
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] ,)
lowercase__ : Tuple = text_classifier('''This is great !''' ,top_k=1 )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
lowercase__ : Dict = text_classifier('''This is great !''' ,return_all_scores=_snake_case )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowercase__ : Optional[int] = text_classifier('''This is great !''' ,return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) ,[[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
lowercase__ : Dict = text_classifier(['''This is great !''', '''Something else'''] ,return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) ,[
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] ,)
lowercase__ : Dict = text_classifier(['''This is great !''', '''Something else'''] ,return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) ,[
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] ,)
@require_torch
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
import torch
lowercase__ : List[str] = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''pt''' ,device=torch.device('''cpu''' ) ,)
lowercase__ : Optional[Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''tf''' )
lowercase__ : Tuple = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = pipeline('''text-classification''' )
lowercase__ : List[Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowercase__ : Optional[int] = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowercase__ : Union[str, Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowercase__ : str = pipeline('''text-classification''' ,framework='''tf''' )
lowercase__ : Union[str, Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowercase__ : int = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowercase__ : List[Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ,_snake_case : Union[str, Any] ,_snake_case : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = TextClassificationPipeline(model=_snake_case ,tokenizer=_snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : str ) -> str:
"""simple docstring"""
lowercase__ : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase__ : int = '''HuggingFace is in'''
lowercase__ : List[Any] = text_classifier(_snake_case )
self.assertEqual(nested_simplify(_snake_case ) ,[{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowercase__ : Union[str, Any] = ['''HuggingFace is in ''', '''Paris is in France''']
lowercase__ : Optional[int] = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) ,[{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}, {'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] ,)
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase__ : Dict = text_classifier(_snake_case ,top_k=_snake_case )
lowercase__ : List[str] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_snake_case ) ,[[{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] * N, [{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] * N] ,)
lowercase__ : Union[str, Any] = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowercase__ : Optional[int] = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) ,{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )} ,)
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase__ : List[str] = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(_snake_case ):
text_classifier(_snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase__ : Dict = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(_snake_case ) ,[{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] ,)
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 16 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.test()
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = 0
lowercase__ : Dict = False
while not completed:
if counter == 1:
self.reset()
lowercase__ : int = self.advance()
if not self.does_advance(_snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase__ , lowercase__ , lowercase__ : Any = self.update(_snake_case )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : int ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : List[str] ,_snake_case : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : List[int] ) -> Any:
"""simple docstring"""
super(_snake_case ,self ).__init__()
if not isinstance(_snake_case ,_snake_case ) or len(_snake_case ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_snake_case ,_snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
lowercase__ : List[str] = token_ids
lowercase__ : Tuple = len(self.token_ids )
lowercase__ : Tuple = -1 # the index of the currently fulfilled step
lowercase__ : List[Any] = False
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : Optional[int] = False
lowercase__ : List[Any] = False
lowercase__ : str = False
if self.does_advance(_snake_case ):
self.fulfilled_idx += 1
lowercase__ : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ : Optional[int] = True
lowercase__ : str = completed
else:
# failed to make progress.
lowercase__ : List[Any] = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = False
lowercase__ : Any = 0
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Tuple=False ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ : Union[str, Any] = self.seqlen
lowercase__ : Tuple = self.fulfilled_idx
lowercase__ : Tuple = self.completed
return new_constraint
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[List[int]] ,_snake_case : int=True ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = max([len(_snake_case ) for one in nested_token_ids] )
lowercase__ : List[str] = {}
for token_ids in nested_token_ids:
lowercase__ : Optional[int] = root
for tidx, token_id in enumerate(_snake_case ):
if token_id not in level:
lowercase__ : str = {}
lowercase__ : str = level[token_id]
if no_subsets and self.has_subsets(_snake_case ,_snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f""" {nested_token_ids}.""" )
lowercase__ : int = root
def UpperCAmelCase ( self : List[Any] ,_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = self.trie
for current_token in current_seq:
lowercase__ : int = start[current_token]
lowercase__ : Union[str, Any] = list(start.keys() )
return next_tokens
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = self.next_tokens(_snake_case )
return len(_snake_case ) == 0
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = list(root.values() )
if len(_snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(_snake_case ) for nn in next_nodes] )
def UpperCAmelCase ( self : int ,_snake_case : Tuple ,_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.count_leaves(_snake_case )
return len(_snake_case ) != leaf_count
class __A ( A_ ):
'''simple docstring'''
def __init__( self : str ,_snake_case : List[List[int]] ) -> List[str]:
"""simple docstring"""
super(_snake_case ,self ).__init__()
if not isinstance(_snake_case ,_snake_case ) or len(_snake_case ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_snake_case ,_snake_case ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_snake_case ,_snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
lowercase__ : Tuple = DisjunctiveTrie(_snake_case )
lowercase__ : Tuple = nested_token_ids
lowercase__ : str = self.trie.max_height
lowercase__ : int = []
lowercase__ : int = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = self.trie.next_tokens(self.current_seq )
if len(_snake_case ) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : List[str] ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : List[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> Tuple:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : List[str] = False
lowercase__ : Tuple = False
lowercase__ : List[Any] = False
if self.does_advance(_snake_case ):
self.current_seq.append(_snake_case )
lowercase__ : Optional[Any] = True
else:
lowercase__ : str = True
self.reset()
lowercase__ : Any = self.trie.reached_leaf(self.current_seq )
lowercase__ : Dict = completed
return stepped, completed, reset
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = False
lowercase__ : Dict = []
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple=False ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ : Any = self.seqlen
lowercase__ : Tuple = self.current_seq
lowercase__ : int = self.completed
return new_constraint
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : List[Constraint] ) -> int:
"""simple docstring"""
lowercase__ : str = constraints
# max # of steps required to fulfill a given constraint
lowercase__ : str = max([c.seqlen for c in constraints] )
lowercase__ : Any = len(_snake_case )
lowercase__ : Optional[Any] = False
self.init_state()
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : str = []
lowercase__ : Tuple = None
lowercase__ : Optional[Any] = [constraint.copy(stateful=_snake_case ) for constraint in self.constraints]
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ : Tuple = constraint.advance()
if isinstance(_snake_case ,_snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
token_list.extend(_snake_case )
else:
lowercase__ : List[Any] = self.inprogress_constraint.advance()
if isinstance(_snake_case ,_snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
token_list.extend(_snake_case )
if len(_snake_case ) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[List[int]] ) -> Optional[int]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ , lowercase__ : Dict = self.add(_snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
lowercase__ , lowercase__ : Union[str, Any] = False, False
if self.completed:
lowercase__ : List[Any] = True
lowercase__ : Union[str, Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ , lowercase__ , lowercase__ : Dict = self.inprogress_constraint.update(_snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_snake_case ) )
lowercase__ : str = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ : Union[str, Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ : List[str] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_snake_case ):
lowercase__ , lowercase__ , lowercase__ : Dict = pending_constraint.update(_snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_snake_case )
lowercase__ : str = None
if not complete and stepped:
lowercase__ : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ : int = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase ( self : List[Any] ,_snake_case : str=True ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ : Optional[int] = [
constraint.copy(stateful=_snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ : List[Any] = self.inprogress_constraint.copy(stateful=_snake_case )
lowercase__ : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 16 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
from statistics import mean
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list:
lowercase__ : Union[str, Any] = 0
# Number of processes finished
lowercase__ : Optional[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ : Union[str, Any] = [0] * no_of_process
# List to include calculation results
lowercase__ : List[Any] = [0] * no_of_process
# Sort by arrival time.
lowercase__ : int = [burst_time[i] for i in np.argsort(__lowerCamelCase )]
lowercase__ : str = [process_name[i] for i in np.argsort(__lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ : List[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ : str = arrival_time[i]
lowercase__ : Optional[Any] = 0
# Index showing the location of the process being performed
lowercase__ : Optional[int] = 0
# Saves the current response ratio.
lowercase__ : str = 0
for i in range(0 , __lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ : Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ : int = temp
lowercase__ : Optional[Any] = i
# Calculate the turn around time
lowercase__ : List[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ : List[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list:
lowercase__ : str = [0] * no_of_process
for i in range(0 , __lowerCamelCase ):
lowercase__ : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase_ = 5
lowerCAmelCase_ = ['A', 'B', 'C', 'D', 'E']
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 16 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __A :
'''simple docstring'''
lowerCAmelCase : List[Any] = MBartConfig
lowerCAmelCase : int = {}
lowerCAmelCase : Optional[int] = "gelu"
def __init__( self : str ,_snake_case : Any ,_snake_case : Optional[int]=13 ,_snake_case : str=7 ,_snake_case : Any=True ,_snake_case : Union[str, Any]=False ,_snake_case : List[str]=99 ,_snake_case : Any=32 ,_snake_case : Optional[int]=2 ,_snake_case : Dict=4 ,_snake_case : str=37 ,_snake_case : Optional[int]=0.1 ,_snake_case : Dict=0.1 ,_snake_case : List[str]=20 ,_snake_case : Optional[Any]=2 ,_snake_case : Optional[Any]=1 ,_snake_case : Optional[int]=0 ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Tuple = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Any = vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Union[str, Any] = eos_token_id
lowercase__ : List[Any] = pad_token_id
lowercase__ : Tuple = bos_token_id
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowercase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowercase__ : Optional[int] = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase__ : Tuple = prepare_mbart_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = TFMBartModel(config=_snake_case ).get_decoder()
lowercase__ : List[str] = inputs_dict['''input_ids''']
lowercase__ : Dict = input_ids[:1, :]
lowercase__ : Any = inputs_dict['''attention_mask'''][:1, :]
lowercase__ : List[Any] = inputs_dict['''head_mask''']
lowercase__ : List[Any] = 1
# first forward pass
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
lowercase__ , lowercase__ : int = outputs.to_tuple()
lowercase__ : Dict = past_key_values[1]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> int:
if attention_mask is None:
lowercase__ : int = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase : Optional[Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : str = False
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : int ,_snake_case : List[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = TFMBartModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self ,config_class=_snake_case )
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCAmelCase : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCAmelCase : str = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase ( self : List[Any] ,**_snake_case : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = self.translate_src_text(**_snake_case )
self.assertListEqual(self.expected_text ,_snake_case )
def UpperCAmelCase ( self : Any ,**_snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : str = self.tokenizer(self.src_text ,**_snake_case ,return_tensors='''tf''' )
lowercase__ : Dict = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
lowercase__ : List[str] = self.tokenizer.batch_decode(_snake_case ,skip_special_tokens=_snake_case )
return generated_words
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['LayoutLMv2FeatureExtractor']
lowerCAmelCase_ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int:
lowercase__ : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
lowercase__ : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def __UpperCAmelCase ( __lowerCamelCase = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
lowercase__ : Tuple = nums[0]
for i in range(1 , len(__lowerCamelCase ) ):
lowercase__ : str = nums[i]
lowercase__ : Tuple = max(__lowerCamelCase , ans + num , __lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase_ = int(input('Enter number of elements : ').strip())
lowerCAmelCase_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
while b:
lowercase__ , lowercase__ : str = b, a % b
return a
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCamelCase , a % b )
def __UpperCAmelCase ( ) -> Tuple:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False ) -> str:
lowercase__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
for i in range(config.num_hidden_layers ):
lowercase__ : List[str] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Tuple = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
lowercase__ : int = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : str = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : Any = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : List[str] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : int = dct.pop(__lowerCamelCase )
lowercase__ : Optional[Any] = val
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Tuple = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=__lowerCamelCase )
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = False
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[Any] = False
if "vqa" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[int] = 31_29
lowercase__ : Tuple = '''huggingface/label-files'''
lowercase__ : Tuple = '''vqa2-id2label.json'''
lowercase__ : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Any = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : int = idalabel
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = ViltForQuestionAnswering(__lowerCamelCase )
elif "nlvr" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : int = 2
lowercase__ : Any = {0: '''False''', 1: '''True'''}
lowercase__ : List[str] = {v: k for k, v in config.idalabel.items()}
lowercase__ : Dict = 3
lowercase__ : List[Any] = ViltForImagesAndTextClassification(__lowerCamelCase )
elif "irtr" in checkpoint_url:
lowercase__ : List[Any] = True
lowercase__ : Tuple = ViltForImageAndTextRetrieval(__lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
lowercase__ : Dict = True
lowercase__ : int = ViltForMaskedLM(__lowerCamelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowercase__ : int = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''state_dict''']
lowercase__ : Optional[int] = create_rename_keys(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
if mlm_model or irtr_model:
lowercase__ : Dict = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase__ , lowercase__ : str = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowerCamelCase )
# Define processor
lowercase__ : Union[str, Any] = ViltImageProcessor(size=3_84 )
lowercase__ : Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ : Union[str, Any] = ViltProcessor(__lowerCamelCase , __lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase__ : str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowerCamelCase ).raw )
lowercase__ : Any = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowerCamelCase ).raw )
lowercase__ : Optional[Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowercase__ : Any = processor(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
lowercase__ : Any = processor(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
lowercase__ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase__ : str = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowerCamelCase ).raw )
if mlm_model:
lowercase__ : Union[str, Any] = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowercase__ : Union[str, Any] = '''How many cats are there?'''
lowercase__ : int = processor(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
lowercase__ : Optional[int] = model(**__lowerCamelCase )
# Verify outputs
if mlm_model:
lowercase__ : str = torch.Size([1, 11, 3_05_22] )
lowercase__ : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowercase__ : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase__ : Tuple = torch.Size([1, 31_29] )
lowercase__ : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowercase__ : Optional[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase__ : Dict = torch.Size([1, 2] )
lowercase__ : Dict = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase , __lowerCamelCase ) -> bool:
lowercase__ : Union[str, Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase__ : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
lowercase__ : Optional[int] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 ) -> None:
def identity_function(__lowerCamelCase ) -> float:
return x
lowercase__ : List[Any] = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> None:
def function_to_integrate(__lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
lowercase__ : int = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *_snake_case : Optional[int] ,**_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ,_snake_case : str ,_snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : str = pipeline(
'''zero-shot-object-detection''' ,model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowercase__ : Optional[int] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = object_detector(examples[0] ,threshold=0.0 )
lowercase__ : Optional[int] = len(_snake_case )
self.assertGreater(_snake_case ,0 )
self.assertEqual(
_snake_case ,[
{
'''score''': ANY(_snake_case ),
'''label''': ANY(_snake_case ),
'''box''': {'''xmin''': ANY(_snake_case ), '''ymin''': ANY(_snake_case ), '''xmax''': ANY(_snake_case ), '''ymax''': ANY(_snake_case )},
}
for i in range(_snake_case )
] ,)
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = pipeline(
'''zero-shot-object-detection''' ,model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowercase__ : str = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] ,)
lowercase__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] ,)
@require_torch
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = pipeline('''zero-shot-object-detection''' )
lowercase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,)
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] ,)
lowercase__ : int = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] ,)
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] ,)
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = 0.2
lowercase__ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowercase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,threshold=_snake_case ,)
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] ,)
@require_torch
@slow
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = 2
lowercase__ : int = pipeline('''zero-shot-object-detection''' )
lowercase__ : Union[str, Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,top_k=_snake_case ,)
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] ,)
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCAmelCase_ = logging.getLogger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Tuple=-1 ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = label_idx
def UpperCAmelCase ( self : Any ,_snake_case : int ,_snake_case : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = mode.value
lowercase__ : Optional[Any] = os.path.join(_snake_case ,f"""{mode}.txt""" )
lowercase__ : Union[str, Any] = 1
lowercase__ : Tuple = []
with open(_snake_case ,encoding='''utf-8''' ) as f:
lowercase__ : Union[str, Any] = []
lowercase__ : Tuple = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" ,words=_snake_case ,labels=_snake_case ) )
guid_index += 1
lowercase__ : List[str] = []
lowercase__ : List[Any] = []
else:
lowercase__ : Dict = line.split(''' ''' )
words.append(splits[0] )
if len(_snake_case ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' ,'''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" ,words=_snake_case ,labels=_snake_case ) )
return examples
def UpperCAmelCase ( self : str ,_snake_case : TextIO ,_snake_case : TextIO ,_snake_case : List ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(_snake_case )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ : Union[str, Any] = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(_snake_case )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' ,line.split()[0] )
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> List[str]:
"""simple docstring"""
if path:
with open(_snake_case ,'''r''' ) as f:
lowercase__ : str = f.read().splitlines()
if "O" not in labels:
lowercase__ : List[str] = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __A ( A_ ):
'''simple docstring'''
def __init__( self : str ) -> Any:
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCAmelCase ( self : str ,_snake_case : str ) -> List[str]:
"""simple docstring"""
if path:
with open(_snake_case ,'''r''' ) as f:
lowercase__ : int = f.read().splitlines()
if "O" not in labels:
lowercase__ : List[str] = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = mode.value
lowercase__ : str = os.path.join(_snake_case ,f"""{mode}.txt""" )
lowercase__ : Union[str, Any] = 1
lowercase__ : Tuple = []
with open(_snake_case ,encoding='''utf-8''' ) as f:
for sentence in parse_incr(_snake_case ):
lowercase__ : Tuple = []
lowercase__ : Any = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(_snake_case ) == len(_snake_case )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" ,words=_snake_case ,labels=_snake_case ) )
guid_index += 1
return examples
def UpperCAmelCase ( self : List[Any] ,_snake_case : TextIO ,_snake_case : TextIO ,_snake_case : List ) -> str:
"""simple docstring"""
lowercase__ : Dict = 0
for sentence in parse_incr(_snake_case ):
lowercase__ : Any = preds_list[example_id]
lowercase__ : Union[str, Any] = ''''''
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_snake_case )
example_id += 1
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
if path:
with open(_snake_case ,'''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 16 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __A :
'''simple docstring'''
def UpperCAmelCase ( self : Any ,_snake_case : Optional[Any] ,_snake_case : Tuple ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Optional[Any] ,_snake_case : Dict ,_snake_case : Optional[Any]=None ,**_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case ,_snake_case )
lowercase__ : Tuple = TFVisionTextDualEncoderModel(_snake_case )
lowercase__ : Union[str, Any] = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : Any ,_snake_case : Any ,_snake_case : int ,_snake_case : List[str]=None ,**_snake_case : str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : str = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : Dict = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : int ,_snake_case : Tuple ,_snake_case : Optional[Any] ,_snake_case : List[Any] ,_snake_case : str=None ,**_snake_case : str ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : str = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase__ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
lowercase__ : Tuple = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Tuple ,_snake_case : Any ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Any=None ,**_snake_case : List[str] ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : int = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : List[Any] = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
lowercase__ : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
lowercase__ : Any = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
lowercase__ : Dict = after_output[0].numpy()
lowercase__ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case ,1e-5 )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : str ,_snake_case : str ,_snake_case : Optional[Any] ,_snake_case : Optional[int]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : Dict = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : Optional[Any] = model(
input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ,output_attentions=_snake_case )
lowercase__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : Tuple = to_atuple(vision_model.config.image_size )
lowercase__ : int = to_atuple(vision_model.config.patch_size )
lowercase__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : np.ndarray ,_snake_case : np.ndarray ,_snake_case : float ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case ,_snake_case ,f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.get_pretrained_model_and_inputs()
lowercase__ : str = model_a(**_snake_case )
lowercase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
lowercase__ : Dict = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
lowercase__ : Tuple = model_a(**_snake_case )
lowercase__ : Optional[int] = after_outputs[0].numpy()
lowercase__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case ,1e-5 )
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-random-bert''' )
lowercase__ : Any = 13
lowercase__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase__ : Union[str, Any] = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
lowercase__ : int = random_attention_mask([batch_size, 4] )
lowercase__ : int = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFViTModel(_snake_case ,name='''vision_model''' )
lowercase__ : Optional[int] = TFBertModel(_snake_case ,name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFViTModelTester(self )
lowercase__ : List[str] = TFBertModelTester(self )
lowercase__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
lowercase__ : str = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = vision_config_and_inputs
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' ,'''hf-internal-testing/tiny-random-roberta''' )
lowercase__ : Optional[Any] = 13
lowercase__ : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase__ : Tuple = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
lowercase__ : Tuple = random_attention_mask([batch_size, 4] )
lowercase__ : str = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,**_snake_case : str ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : str = model(
input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ,output_attentions=_snake_case )
lowercase__ : int = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) ,vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase__ : Union[str, Any] = to_atuple(vision_model.config.image_size )
lowercase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = TFDeiTModel(_snake_case ,name='''vision_model''' )
lowercase__ : int = TFRobertaModel(_snake_case ,name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : Tuple = TFDeiTModelTester(self )
lowercase__ : Optional[Any] = TFRobertaModelTester(self )
lowercase__ : List[str] = vit_model_tester.prepare_config_and_inputs()
lowercase__ : Dict = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = vision_config_and_inputs
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' ,'''hf-internal-testing/tiny-random-bert''' )
lowercase__ : List[str] = 13
lowercase__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase__ : Any = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
lowercase__ : Dict = random_attention_mask([batch_size, 4] )
lowercase__ : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ,_snake_case : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = TFCLIPVisionModel(_snake_case ,name='''vision_model''' )
lowercase__ : Optional[int] = TFBertModel(_snake_case ,name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = TFCLIPVisionModelTester(self )
lowercase__ : List[Any] = TFBertModelTester(self )
lowercase__ : List[Any] = clip_model_tester.prepare_config_and_inputs()
lowercase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ : Tuple = vision_config_and_inputs
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 ,from_pt=_snake_case )
lowercase__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowercase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Any = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=_snake_case ,padding=_snake_case ,return_tensors='''np''' )
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
lowercase__ : List[Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() ,_snake_case ,atol=1e-3 ) )
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase_ = random.Random()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=1.0 , __lowerCamelCase=None , __lowerCamelCase=None ) -> int:
if rng is None:
lowercase__ : List[Any] = global_rng
lowercase__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : Dict ,_snake_case : Optional[Any]=7 ,_snake_case : int=400 ,_snake_case : Dict=2_000 ,_snake_case : Dict=24 ,_snake_case : Any=24 ,_snake_case : str=0.0 ,_snake_case : Optional[Any]=16_000 ,_snake_case : List[str]=True ,_snake_case : Dict=True ,) -> Dict:
"""simple docstring"""
lowercase__ : str = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[Any] = min_seq_length
lowercase__ : int = max_seq_length
lowercase__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : Tuple = feature_size
lowercase__ : str = num_mel_bins
lowercase__ : List[Any] = padding_value
lowercase__ : Tuple = sampling_rate
lowercase__ : List[Any] = return_attention_mask
lowercase__ : Dict = do_normalize
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self : Any ,_snake_case : Any=False ,_snake_case : int=False ) -> Tuple:
"""simple docstring"""
def _flatten(_snake_case : Optional[Any] ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
lowercase__ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
lowercase__ : Optional[Any] = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = SpeechaTextFeatureExtractionTester(self )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(_snake_case ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_snake_case ,axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
lowercase__ : Any = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : List[str] = feature_extractor(_snake_case ,padding=_snake_case ,return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowercase__ : Dict = feature_extractor(speech_inputs[0] ,return_tensors='''np''' ).input_features
lowercase__ : str = feature_extractor(np_speech_inputs[0] ,return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
# Test batched
lowercase__ : Optional[int] = feature_extractor(_snake_case ,return_tensors='''np''' ).input_features
lowercase__ : Union[str, Any] = feature_extractor(_snake_case ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_snake_case ,_snake_case ):
self.assertTrue(np.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ : Dict = np.asarray(_snake_case )
lowercase__ : str = feature_extractor(_snake_case ,return_tensors='''np''' ).input_features
lowercase__ : Tuple = feature_extractor(_snake_case ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_snake_case ,_snake_case ):
self.assertTrue(np.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
lowercase__ : str = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase__ : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(_snake_case ,_snake_case ):
lowercase__ : Union[str, Any] = feature_extractor(
_snake_case ,padding=_snake_case ,max_length=_snake_case ,return_attention_mask=_snake_case )
lowercase__ : Any = inputs.input_features
lowercase__ : Any = inputs.attention_mask
lowercase__ : Optional[Any] = [np.sum(_snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
lowercase__ : List[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase__ : Optional[int] = [None, 16, None]
for max_length, padding in zip(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = feature_extractor(
_snake_case ,max_length=_snake_case ,padding=_snake_case ,return_tensors='''np''' ,return_attention_mask=_snake_case )
lowercase__ : Any = inputs.input_features
lowercase__ : Any = inputs.attention_mask
lowercase__ : Tuple = [np.sum(_snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Any = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
lowercase__ : List[Any] = feature_extractor(
_snake_case ,padding='''max_length''' ,max_length=4 ,truncation=_snake_case ,return_tensors='''np''' ,return_attention_mask=_snake_case ,)
lowercase__ : Optional[int] = inputs.input_features
lowercase__ : Optional[Any] = inputs.attention_mask
lowercase__ : str = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : int = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
lowercase__ : str = feature_extractor(
_snake_case ,padding='''longest''' ,max_length=4 ,truncation=_snake_case ,return_tensors='''np''' ,return_attention_mask=_snake_case ,)
lowercase__ : Optional[Any] = inputs.input_features
lowercase__ : List[str] = inputs.attention_mask
lowercase__ : int = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 4, 24) )
lowercase__ : Dict = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
lowercase__ : List[Any] = feature_extractor(
_snake_case ,padding='''longest''' ,max_length=16 ,truncation=_snake_case ,return_tensors='''np''' ,return_attention_mask=_snake_case ,)
lowercase__ : List[str] = inputs.input_features
lowercase__ : List[Any] = inputs.attention_mask
lowercase__ : Dict = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 6, 24) )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
import torch
lowercase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : int = np.random.rand(100 ,32 ).astype(np.floataa )
lowercase__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : Tuple = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ : Union[str, Any] = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase ( self : Any ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
lowercase__ : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
# automatic decoding with librispeech
lowercase__ : int = ds.sort('''id''' ).select(range(_snake_case ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : str = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowercase__ : List[Any] = self._load_datasamples(1 )
lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Optional[Any] = feature_extractor(_snake_case ,return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape ,(1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] ,_snake_case ,atol=1e-4 ) )
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
lowercase__ : Optional[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowercase__ : Tuple = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowercase__ : List[str] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowercase__ : Union[str, Any] = os.path.join(get_home_dir() , '''models''' )
lowercase__ : List[str] = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
lowercase__ : str = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
lowercase__ : List[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowercase__ : Optional[int] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCamelCase ),
}
lowercase__ : str = BertConfig.from_dict(__lowerCamelCase )
lowercase__ : str = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : str = hf_param.shape
lowercase__ : int = to_torch(params[gluon_param] )
lowercase__ : Optional[Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowercase__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowercase__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowercase__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowercase__ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowercase__ : int = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowercase__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowercase__ : BertSelfAttention = layer.attention.self
lowercase__ : Any = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowercase__ : int = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowercase__ : Optional[int] = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowercase__ : int = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowercase__ : Optional[int] = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowercase__ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowercase__ : BertSelfOutput = layer.attention.output
lowercase__ : Union[str, Any] = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowercase__ : List[str] = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowercase__ : List[str] = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowercase__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowercase__ : BertIntermediate = layer.intermediate
lowercase__ : Dict = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowercase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowercase__ : BertOutput = layer.output
lowercase__ : str = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowercase__ : Any = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowercase__ : int = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowercase__ : Any = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowercase__ : str = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowercase__ : Tuple = tokenizer.encode_plus(__lowerCamelCase )['''input_ids''']
# Get gluon output
lowercase__ : List[str] = mx.nd.array([input_ids] )
lowercase__ : Union[str, Any] = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
lowercase__ : List[str] = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
lowercase__ : Tuple = tokenizer.encode_plus(__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : Dict = hf_bort_model(**__lowerCamelCase )[0]
lowercase__ : List[str] = output_gluon[0].asnumpy()
lowercase__ : Optional[Any] = output_hf[0].detach().numpy()
lowercase__ : List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowercase__ : List[str] = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 16 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 1 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCAmelCase_ = TypeVar('T')
lowerCAmelCase_ = Union[List[T], Tuple[T, ...]]
lowerCAmelCase_ = Union[T, List[T], Dict[str, T]]
lowerCAmelCase_ = Union[str, bytes, os.PathLike]
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ,)
lowerCAmelCase : int = field(
default=1_0_2_4 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase : bool = field(
default=A_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
lowerCAmelCase : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
lowerCAmelCase : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
lowerCAmelCase : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "A csv or a json file containing the training data."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} )
lowerCAmelCase : Optional[str] = field(default=A_ ,metadata={"help": "A csv or a json file containing the test data."} )
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowercase__ : List[str] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__ : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
default=A_ ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
lowerCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __UpperCAmelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ : Tuple = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__ : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__ : str = data_args.train_file.split('''.''' )[-1]
lowercase__ : Tuple = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__ : Dict = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowercase__ : Union[str, Any] = load_dataset('''csv''' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__ : Optional[Any] = load_dataset('''json''' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__ : int = raw_datasets['''train'''].features['''label'''].names
lowercase__ : List[Any] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__ : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCamelCase , )
lowercase__ : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ : str = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__ : Any = {'''Refused''': 0, '''Entailed''': 1}
lowercase__ : str = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowercase__ : str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__lowerCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__lowerCamelCase ):
lowercase__ : Dict = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowercase__ : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__ : Tuple = examples['''statement''']
lowercase__ : str = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowercase__ : Dict = tokenizer(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase )
lowercase__ : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowercase__ : List[Any] = raw_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowercase__ : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowercase__ : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowercase__ : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowercase__ : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowercase__ : Optional[Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowercase__ : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase ):
lowercase__ : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
lowercase__ : Dict = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ : List[str] = default_data_collator
elif training_args.fpaa:
lowercase__ : Any = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
lowercase__ : List[Any] = None
# Initialize our Trainer
lowercase__ : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
lowercase__ : Dict = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : int = last_checkpoint
lowercase__ : List[str] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
lowercase__ : List[str] = train_result.metrics
lowercase__ : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
lowercase__ : Any = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , __lowerCamelCase )
trainer.save_metrics('''train''' , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Union[str, Any] = trainer.evaluate(eval_dataset=__lowerCamelCase )
lowercase__ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
lowercase__ : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('''eval''' , __lowerCamelCase )
trainer.save_metrics('''eval''' , __lowerCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__ : Tuple = predict_dataset.remove_columns('''label''' )
lowercase__ : str = trainer.predict(__lowerCamelCase , metric_key_prefix='''predict''' ).predictions
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=1 )
lowercase__ : List[Any] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__lowerCamelCase ):
lowercase__ : Optional[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
lowercase__ : Dict = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 1 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["image_processor", "tokenizer"]
lowerCAmelCase : Dict = "OwlViTImageProcessor"
lowerCAmelCase : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : int ,_snake_case : Dict=None ,_snake_case : str=None ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : int = kwargs.pop('''feature_extractor''' )
lowercase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
def __call__( self : Tuple ,_snake_case : int=None ,_snake_case : Optional[int]=None ,_snake_case : Optional[Any]=None ,_snake_case : Any="max_length" ,_snake_case : Optional[Any]="np" ,**_snake_case : List[Any] ) -> Any:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_snake_case ,_snake_case ) or (isinstance(_snake_case ,_snake_case ) and not isinstance(text[0] ,_snake_case )):
lowercase__ : List[Any] = [self.tokenizer(_snake_case ,padding=_snake_case ,return_tensors=_snake_case ,**_snake_case )]
elif isinstance(_snake_case ,_snake_case ) and isinstance(text[0] ,_snake_case ):
lowercase__ : int = []
# Maximum number of queries across batch
lowercase__ : int = max([len(_snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_snake_case ) != max_num_queries:
lowercase__ : Dict = t + [''' '''] * (max_num_queries - len(_snake_case ))
lowercase__ : Tuple = self.tokenizer(_snake_case ,padding=_snake_case ,return_tensors=_snake_case ,**_snake_case )
encodings.append(_snake_case )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowercase__ : Tuple = np.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
lowercase__ : Tuple = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase__ : Union[str, Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
lowercase__ : Optional[Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase__ : Dict = torch.cat([encoding['''input_ids'''] for encoding in encodings] ,dim=0 )
lowercase__ : Optional[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase__ : Dict = tf.stack([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
lowercase__ : Optional[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowercase__ : Dict = BatchEncoding()
lowercase__ : Tuple = input_ids
lowercase__ : Any = attention_mask
if query_images is not None:
lowercase__ : Tuple = BatchEncoding()
lowercase__ : Dict = self.image_processor(
_snake_case ,return_tensors=_snake_case ,**_snake_case ).pixel_values
lowercase__ : Dict = query_pixel_values
if images is not None:
lowercase__ : Union[str, Any] = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Tuple ) -> Tuple:
"""simple docstring"""
return self.image_processor.post_process(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,*_snake_case : Optional[Any] ,**_snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,*_snake_case : Optional[int] ,**_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,*_snake_case : Dict ,**_snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[int] ,*_snake_case : Dict ,**_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
@property
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,_snake_case ,)
return self.image_processor
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __UpperCAmelCase ( ) -> None:
print('''Making key files...''' )
make_key_files('''rsa''' , 10_24 )
print('''Key files generation successful.''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
print('''Generating prime p...''' )
lowercase__ : Optional[Any] = rabinMiller.generate_large_prime(__lowerCamelCase )
print('''Generating prime q...''' )
lowercase__ : Tuple = rabinMiller.generate_large_prime(__lowerCamelCase )
lowercase__ : str = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
lowercase__ : Optional[int] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
lowercase__ : List[Any] = cryptoMath.find_mod_inverse(__lowerCamelCase , (p - 1) * (q - 1) )
lowercase__ : str = (n, e)
lowercase__ : int = (n, d)
return (public_key, private_key)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase__ , lowercase__ : Union[str, Any] = generate_key(__lowerCamelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ : List[str] = _modexpt(__lowerCamelCase , exponent // 2 , __lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowerCamelCase , exponent - 1 , __lowerCamelCase )) % modulo_value
def __UpperCAmelCase ( __lowerCamelCase = 17_77 , __lowerCamelCase = 18_55 , __lowerCamelCase = 8 ) -> int:
lowercase__ : Tuple = base
for _ in range(1 , __lowerCamelCase ):
lowercase__ : List[str] = _modexpt(__lowerCamelCase , __lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase_ = ['bert-base-uncased', 'bert-base-cased']
lowerCAmelCase_ = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class __A ( tf.keras.Model ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : str = tokenizer
lowercase__ : Optional[int] = AutoConfig.from_pretrained(_snake_case )
lowercase__ : int = TFAutoModel.from_config(_snake_case )
def UpperCAmelCase ( self : Tuple ,_snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer(_snake_case )
lowercase__ : Optional[int] = self.bert(**_snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
super().setUp()
lowercase__ : int = [
BertTokenizer.from_pretrained(_snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowercase__ : Dict = [TFBertTokenizer.from_pretrained(_snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_snake_case ,use_fast_bert_tokenizer=_snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase__ : int = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowercase__ : Dict = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase__ : Optional[Any] = tokenizer(_snake_case ,return_tensors='''tf''' ,padding='''longest''' )
lowercase__ : Union[str, Any] = tf_tokenizer(_snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] ,tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : Optional[int] = tf_tokenizer(self.paired_sentences )
lowercase__ : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] ,text_pair=[sentence[1] for sentence in self.paired_sentences] ,)
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] ,tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : Tuple = tf.function(_snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase__ : int = tf.constant(_snake_case )
lowercase__ : List[Any] = compiled_tokenizer(_snake_case )
lowercase__ : Dict = tf_tokenizer(_snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : Tuple = ModelToSave(tokenizer=_snake_case )
lowercase__ : str = tf.convert_to_tensor(self.test_sentences )
lowercase__ : Optional[Any] = model(_snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase__ : List[Any] = Path(_snake_case ) / '''saved.model'''
model.save(_snake_case )
lowercase__ : Union[str, Any] = tf.keras.models.load_model(_snake_case )
lowercase__ : Dict = loaded_model(_snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) ,1e-5 )
| 16 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
for attribute in key.split('''.''' ):
lowercase__ : Union[str, Any] = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowercase__ : List[str] = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowercase__ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase__ : Optional[int] = value
elif weight_type == "weight_g":
lowercase__ : str = value
elif weight_type == "weight_v":
lowercase__ : Any = value
elif weight_type == "bias":
lowercase__ : List[Any] = value
else:
lowercase__ : Optional[int] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : int = []
lowercase__ : Tuple = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : str = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
lowercase__ : Tuple = True
if "*" in mapped_key:
lowercase__ : List[Any] = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
lowercase__ : Any = mapped_key.replace('''*''' , __lowerCamelCase )
if "weight_g" in name:
lowercase__ : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
lowercase__ : List[str] = '''weight_v'''
elif "weight" in name:
lowercase__ : Optional[Any] = '''weight'''
elif "bias" in name:
lowercase__ : Any = '''bias'''
else:
lowercase__ : List[str] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
lowercase__ : str = name.split('''.''' )
lowercase__ : Optional[int] = int(items[0] )
lowercase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase__ : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase__ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase__ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True ) -> Any:
if config_path is not None:
lowercase__ : int = HubertConfig.from_pretrained(__lowerCamelCase )
else:
lowercase__ : int = HubertConfig()
if is_finetuned:
if dict_path:
lowercase__ : Optional[int] = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : Optional[Any] = target_dict.pad_index
lowercase__ : List[str] = target_dict.bos_index
lowercase__ : int = target_dict.eos_index
lowercase__ : List[Any] = len(target_dict.symbols )
lowercase__ : Tuple = os.path.join(__lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
lowercase__ : Dict = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCamelCase , )
lowercase__ : Any = True if config.feat_extract_norm == '''layer''' else False
lowercase__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
lowercase__ : Tuple = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowercase__ : str = HubertForCTC(__lowerCamelCase )
else:
lowercase__ : int = HubertModel(__lowerCamelCase )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase__ : Optional[int] = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "blip_2_vision_model"
def __init__( self : Optional[Any] ,_snake_case : Union[str, Any]=1_408 ,_snake_case : List[str]=6_144 ,_snake_case : Optional[Any]=39 ,_snake_case : Dict=16 ,_snake_case : int=224 ,_snake_case : str=14 ,_snake_case : Tuple="gelu" ,_snake_case : Optional[int]=0.0_0001 ,_snake_case : Any=0.0 ,_snake_case : List[str]=1e-10 ,_snake_case : Optional[int]=True ,**_snake_case : List[str] ,) -> Optional[int]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[Any] = hidden_size
lowercase__ : Tuple = intermediate_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : str = patch_size
lowercase__ : List[str] = image_size
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = attention_dropout
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[int] = hidden_act
lowercase__ : str = qkv_bias
@classmethod
def UpperCAmelCase ( cls : List[str] ,_snake_case : Union[str, os.PathLike] ,**_snake_case : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
lowercase__ , lowercase__ : Dict = cls.get_config_dict(_snake_case ,**_snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase__ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case ,**_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "blip_2_qformer"
def __init__( self : List[str] ,_snake_case : int=30_522 ,_snake_case : str=768 ,_snake_case : List[str]=12 ,_snake_case : List[str]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : Optional[int]="gelu" ,_snake_case : List[Any]=0.1 ,_snake_case : int=0.1 ,_snake_case : Optional[Any]=512 ,_snake_case : Optional[int]=0.02 ,_snake_case : List[Any]=1e-12 ,_snake_case : Union[str, Any]=0 ,_snake_case : Tuple="absolute" ,_snake_case : Dict=2 ,_snake_case : Union[str, Any]=1_408 ,**_snake_case : int ,) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
lowercase__ : Any = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : str = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : str = position_embedding_type
lowercase__ : List[str] = cross_attention_frequency
lowercase__ : str = encoder_hidden_size
@classmethod
def UpperCAmelCase ( cls : Tuple ,_snake_case : Union[str, os.PathLike] ,**_snake_case : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
lowercase__ , lowercase__ : Dict = cls.get_config_dict(_snake_case ,**_snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase__ : Tuple = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case ,**_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "blip-2"
lowerCAmelCase : Tuple = True
def __init__( self : List[Any] ,_snake_case : str=None ,_snake_case : Tuple=None ,_snake_case : Optional[Any]=None ,_snake_case : List[Any]=32 ,**_snake_case : List[Any] ) -> int:
"""simple docstring"""
super().__init__(**_snake_case )
if vision_config is None:
lowercase__ : Optional[Any] = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowercase__ : Optional[Any] = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowercase__ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowercase__ : List[str] = BlipaVisionConfig(**_snake_case )
lowercase__ : Union[str, Any] = BlipaQFormerConfig(**_snake_case )
lowercase__ : Tuple = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowercase__ : List[str] = CONFIG_MAPPING[text_model_type](**_snake_case )
lowercase__ : int = self.text_config.tie_word_embeddings
lowercase__ : Any = self.text_config.is_encoder_decoder
lowercase__ : Union[str, Any] = num_query_tokens
lowercase__ : Union[str, Any] = self.vision_config.hidden_size
lowercase__ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__ : List[str] = 1.0
lowercase__ : Any = 0.02
@classmethod
def UpperCAmelCase ( cls : Tuple ,_snake_case : BlipaVisionConfig ,_snake_case : BlipaQFormerConfig ,_snake_case : PretrainedConfig ,**_snake_case : int ,) -> List[str]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_snake_case ,)
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.vision_config.to_dict()
lowercase__ : Dict = self.qformer_config.to_dict()
lowercase__ : List[str] = self.text_config.to_dict()
lowercase__ : Optional[int] = self.__class__.model_type
return output
| 16 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowercase__ : List[str] = AutoModelForSeqaSeqLM.from_config(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
AutoTokenizer.from_pretrained(__lowerCamelCase ).save_pretrained(__lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 16 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __A ( A_ ):
'''simple docstring'''
def __init__( self : str ,_snake_case : Tuple ,_snake_case : int=False ,_snake_case : Union[str, Any]=True ,_snake_case : List[Any]=False ,_snake_case : Dict="<s>" ,_snake_case : Optional[Any]="</s>" ,_snake_case : List[str]="<unk>" ,_snake_case : int="<sep>" ,_snake_case : List[Any]="<pad>" ,_snake_case : Union[str, Any]="<cls>" ,_snake_case : str="<mask>" ,_snake_case : int=["<eop>", "<eod>"] ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : Optional[int] ,) -> None:
"""simple docstring"""
lowercase__ : Union[str, Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
lowercase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,additional_special_tokens=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : Any = 3
lowercase__ : Union[str, Any] = do_lower_case
lowercase__ : Optional[int] = remove_space
lowercase__ : Optional[Any] = keep_accents
lowercase__ : List[str] = vocab_file
lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ : Union[str, Any] = jieba
lowercase__ : Tuple = str.maketrans(''' \n''' ,'''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = self.__dict__.copy()
lowercase__ : Optional[int] = None
return state
def __setstate__( self : List[str] ,_snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.remove_space:
lowercase__ : str = ''' '''.join(inputs.strip().split() )
else:
lowercase__ : List[str] = inputs
lowercase__ : Tuple = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
lowercase__ : Tuple = unicodedata.normalize('''NFKD''' ,_snake_case )
lowercase__ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
lowercase__ : List[str] = outputs.lower()
return outputs
def UpperCAmelCase ( self : int ,_snake_case : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = self.preprocess_text(_snake_case )
lowercase__ : int = self.sp_model.encode(_snake_case ,out_type=_snake_case )
lowercase__ : List[Any] = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ : int = cur_pieces[1:]
else:
lowercase__ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCAmelCase ( self : List[str] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Tuple ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : List[str] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def UpperCAmelCase ( self : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ : Union[str, Any] = super()._decode(*_snake_case ,**_snake_case )
lowercase__ : Tuple = text.replace(''' ''' ,'''''' ).replace('''\u2582''' ,''' ''' ).replace('''\u2583''' ,'''\n''' )
return text
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["pixel_values"]
def __init__( self : Optional[int] ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : int = 0.9 ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : int ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Dict = get_size_dict(_snake_case ,param_name='''crop_size''' )
lowercase__ : int = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Tuple = crop_pct
lowercase__ : int = resample
lowercase__ : Dict = do_center_crop
lowercase__ : int = crop_size
lowercase__ : List[Any] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[float] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase__ : Dict = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase__ : Tuple = int(size['''height'''] / crop_pct )
else:
lowercase__ : List[str] = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(_snake_case ) )
lowercase__ : List[Any] = get_resize_output_image_size(_snake_case ,size=_snake_case ,default_to_square=_snake_case )
else:
if "shortest_edge" in size:
lowercase__ : Tuple = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
lowercase__ : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(_snake_case ) )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : List[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Union[str, Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : np.ndarray ,_snake_case : Union[int, float] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : str ,) -> Union[str, Any]:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : ImageInput ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : int = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : bool = None ,_snake_case : float = None ,_snake_case : bool = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : ChannelDimension = ChannelDimension.FIRST ,**_snake_case : List[Any] ,) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ : List[Any] = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[Any] = size if size is not None else self.size
lowercase__ : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[Any] = get_size_dict(_snake_case ,param_name='''crop_size''' )
lowercase__ : Dict = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : Optional[Any] = [self.resize(image=_snake_case ,size=_snake_case ,crop_pct=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : List[Any] = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : List[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : Union[str, Any] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : List[Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : int | None = None ) -> Any:
"""simple docstring"""
lowercase__ : int = value
lowercase__ : Any = random()
lowercase__ : Node | None = None
lowercase__ : Node | None = None
def __repr__( self : Tuple ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : Dict = str(self.value ) + ''' '''
lowercase__ : Dict = str(self.left or '''''' )
lowercase__ : List[str] = str(self.right or '''''' )
return value + left + right
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase__ , lowercase__ : Dict = split(root.left , __lowerCamelCase )
return left, root
else:
lowercase__ , lowercase__ : Optional[Any] = split(root.right , __lowerCamelCase )
return root, right
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase__ : Optional[int] = merge(left.right , __lowerCamelCase )
return left
else:
lowercase__ : str = merge(__lowerCamelCase , right.left )
return right
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Node | None:
lowercase__ : Union[str, Any] = Node(__lowerCamelCase )
lowercase__ , lowercase__ : Optional[Any] = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Node | None:
lowercase__ , lowercase__ : List[str] = split(__lowerCamelCase , value - 1 )
lowercase__ , lowercase__ : Tuple = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
lowercase__ : Any = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
lowercase__ : List[Any] = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def __UpperCAmelCase ( ) -> None:
lowercase__ : str = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase__ : Optional[Any] = input()
while args != "q":
lowercase__ : Union[str, Any] = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
lowercase__ : Optional[Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
lowerCAmelCase_ = {
'ctrl': 256,
}
lowerCAmelCase_ = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Dict = set()
lowercase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Dict = char
lowercase__ : Tuple = set(__lowerCamelCase )
return pairs
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[Any] = CONTROL_CODES
def __init__( self : int ,_snake_case : str ,_snake_case : Tuple ,_snake_case : List[str]="<unk>" ,**_snake_case : List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(unk_token=_snake_case ,**_snake_case )
with open(_snake_case ,encoding='''utf-8''' ) as vocab_handle:
lowercase__ : Dict = json.load(_snake_case )
lowercase__ : str = {v: k for k, v in self.encoder.items()}
with open(_snake_case ,encoding='''utf-8''' ) as merges_handle:
lowercase__ : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase__ : List[Any] = [tuple(merge.split() ) for merge in merges]
lowercase__ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : int = {}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : str = tuple(_snake_case )
lowercase__ : Dict = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase__ : Any = get_pairs(_snake_case )
if not pairs:
return token
while True:
lowercase__ : Dict = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Any = bigram
lowercase__ : Tuple = []
lowercase__ : Any = 0
while i < len(_snake_case ):
try:
lowercase__ : Optional[Any] = word.index(_snake_case ,_snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : int = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Tuple = tuple(_snake_case )
lowercase__ : int = new_word
if len(_snake_case ) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(_snake_case )
lowercase__ : Union[str, Any] = '''@@ '''.join(_snake_case )
lowercase__ : List[Any] = word[:-4]
lowercase__ : int = word
return word
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = []
lowercase__ : int = re.findall(r'''\S+\n?''' ,_snake_case )
for token in words:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(_snake_case ,self.unk_token )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = ''' '''.join(_snake_case ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + '''\n''' )
lowercase__ : Optional[int] = 0
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ : List[Any] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 16 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : Union[str, Any] ,_snake_case : Tuple=12 ,_snake_case : Optional[int]=7 ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Optional[int]=True ,_snake_case : Union[str, Any]=99 ,_snake_case : int=32 ,_snake_case : int=32 ,_snake_case : List[Any]=2 ,_snake_case : int=4 ,_snake_case : Union[str, Any]=37 ,_snake_case : Dict=0.1 ,_snake_case : int=0.1 ,_snake_case : int=512 ,_snake_case : Tuple=0.02 ,_snake_case : Optional[Any]=0 ,_snake_case : Union[str, Any]=None ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : Dict = use_input_mask
lowercase__ : Union[str, Any] = use_labels
lowercase__ : str = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = projection_dim
lowercase__ : str = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Tuple = initializer_range
lowercase__ : Union[str, Any] = scope
lowercase__ : Union[str, Any] = bos_token_id
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ : Any = input_mask.numpy()
lowercase__ , lowercase__ : List[str] = input_mask.shape
lowercase__ : Tuple = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Any = 0
lowercase__ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Dict ,_snake_case : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = TFBlipTextModel(config=_snake_case )
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,training=_snake_case )
lowercase__ : List[Any] = model(_snake_case ,training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = BlipTextModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=True ) -> Optional[int]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase : Any = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''tf''' )
lowercase__ : List[str] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_snake_case ,decimals=6 ) ,[
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38_015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25_506, '''token_str''': ''' accuser'''},
] ,)
lowercase__ : List[str] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_snake_case ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 38_015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 25_506,
'''token_str''': ''' accuser''',
},
] ,)
lowercase__ : Any = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=6 ) ,[
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2_941, '''token_str''': ''' Te'''},
] ,)
@require_torch
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''pt''' )
lowercase__ : Any = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_snake_case ,decimals=6 ) ,[
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''},
] ,)
lowercase__ : int = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_snake_case ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''},
] ,)
lowercase__ : Optional[int] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=6 ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2_941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''},
] ,)
lowercase__ : Any = unmasker('''My name is <mask> <mask>''' ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=6 ) ,[
[
{
'''score''': 2.2e-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] ,)
@require_torch_gpu
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase__ : List[str] = pipeline('''fill-mask''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,device=0 ,framework='''pt''' )
# convert model to fp16
pipe.model.half()
lowercase__ : Union[str, Any] = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_snake_case ,_snake_case )
@slow
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Dict = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''pt''' )
self.run_large_test(_snake_case )
@slow
@require_tf
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''tf''' )
self.run_large_test(_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_snake_case ) ,[
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_573, '''token_str''': ''' Chris'''},
] ,)
lowercase__ : int = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_snake_case ) ,[
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2_201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 12_790,
'''token_str''': ''' Lyon''',
},
] ,)
lowercase__ : Dict = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(_snake_case ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_941, '''token_str''': ''' Te'''},
] ,)
@require_torch
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''pt''' )
lowercase__ : Tuple = None
lowercase__ : str = None
self.run_pipeline_test(_snake_case ,[] )
@require_tf
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''tf''' )
lowercase__ : str = None
lowercase__ : List[Any] = None
self.run_pipeline_test(_snake_case ,[] )
def UpperCAmelCase ( self : str ,_snake_case : Optional[int] ,_snake_case : Any ,_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
lowercase__ : List[str] = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case )
lowercase__ : List[str] = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def UpperCAmelCase ( self : List[Any] ,_snake_case : Union[str, Any] ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = fill_masker.tokenizer
lowercase__ : List[Any] = fill_masker.model
lowercase__ : str = fill_masker(
f"""This is a {tokenizer.mask_token}""" ,)
self.assertEqual(
_snake_case ,[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
] ,)
lowercase__ : int = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_snake_case ,[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
] ,)
lowercase__ : Optional[Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_snake_case ,[
[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
],
[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
],
] ,)
with self.assertRaises(_snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_snake_case ):
fill_masker('''This is''' )
self.run_test_top_k(_snake_case ,_snake_case )
self.run_test_targets(_snake_case ,_snake_case )
self.run_test_top_k_targets(_snake_case ,_snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(_snake_case ,_snake_case )
self.fill_mask_with_multiple_masks(_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Any ,_snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = tokenizer.get_vocab()
lowercase__ : List[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
lowercase__ : Dict = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case ,targets=_snake_case )
lowercase__ : Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_snake_case ,[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
] ,)
lowercase__ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,_snake_case )
lowercase__ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(_snake_case ) )
# Call argument
lowercase__ : str = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case )
lowercase__ : Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=_snake_case )
self.assertEqual(
_snake_case ,[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
] ,)
lowercase__ : int = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,_snake_case )
lowercase__ : Dict = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(_snake_case ) )
# Score equivalence
lowercase__ : Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=_snake_case )
lowercase__ : Tuple = [top_mask['''token_str'''] for top_mask in outputs]
lowercase__ : str = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_snake_case ) == set(_snake_case ):
lowercase__ : Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=_snake_case )
lowercase__ : Optional[int] = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_snake_case ) ,nested_simplify(_snake_case ) )
# Raises with invalid
with self.assertRaises(_snake_case ):
lowercase__ : str = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_snake_case ):
lowercase__ : str = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[''''''] )
with self.assertRaises(_snake_case ):
lowercase__ : Tuple = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets='''''' )
def UpperCAmelCase ( self : Tuple ,_snake_case : Any ,_snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case ,top_k=2 )
lowercase__ : List[str] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_snake_case ,[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
] ,)
lowercase__ : Optional[Any] = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case )
lowercase__ : Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
_snake_case ,[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
] ,)
self.assertEqual(nested_simplify(_snake_case ) ,nested_simplify(_snake_case ) )
def UpperCAmelCase ( self : Any ,_snake_case : Optional[int] ,_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = tokenizer.get_vocab()
lowercase__ : Optional[int] = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case )
# top_k=2, ntargets=3
lowercase__ : Tuple = sorted(vocab.keys() )[:3]
lowercase__ : int = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=_snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowercase__ : str = [el['''token_str'''] for el in sorted(_snake_case ,key=lambda _snake_case : x["score"] ,reverse=_snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_snake_case ).issubset(_snake_case ):
lowercase__ : Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=_snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_snake_case ) ,nested_simplify(_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : List[Any] ,_snake_case : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case )
lowercase__ : Tuple = tokenizer.get_vocab()
# String duplicates + id duplicates
lowercase__ : Optional[int] = sorted(vocab.keys() )[:3]
lowercase__ : List[str] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowercase__ : Optional[int] = fill_masker(f"""My name is {tokenizer.mask_token}""" ,targets=_snake_case ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_snake_case ) ,3 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = FillMaskPipeline(model=_snake_case ,tokenizer=_snake_case )
lowercase__ : int = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
_snake_case ,[
[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
],
[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
],
[
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
{'''sequence''': ANY(_snake_case ), '''score''': ANY(_snake_case ), '''token''': ANY(_snake_case ), '''token_str''': ANY(_snake_case )},
],
] ,)
| 16 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 1 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : Optional[Any] ,_snake_case : List[str]=3 ,_snake_case : Optional[int]=7 ,_snake_case : int=True ,_snake_case : Tuple=True ,_snake_case : List[Any]=False ,_snake_case : Dict=True ,_snake_case : Tuple=99 ,_snake_case : int=32 ,_snake_case : Optional[Any]=5 ,_snake_case : List[Any]=4 ,_snake_case : Tuple=37 ,_snake_case : Any="gelu" ,_snake_case : Union[str, Any]=0.1 ,_snake_case : Optional[int]=0.1 ,_snake_case : List[str]=512 ,_snake_case : str=16 ,_snake_case : Dict=2 ,_snake_case : List[str]=0.02 ,_snake_case : Any=3 ,_snake_case : Optional[Any]=4 ,_snake_case : List[Any]=None ,) -> List[Any]:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[Any] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : List[str] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Optional[Any] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[str] = initializer_range
lowercase__ : Any = num_labels
lowercase__ : str = num_choices
lowercase__ : Dict = scope
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : int = None
if self.use_input_mask:
lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : int = None
lowercase__ : List[str] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : str = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=_snake_case ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : List[str] ,_snake_case : Any ,_snake_case : str ,_snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = FalconModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,attention_mask=_snake_case )
lowercase__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Tuple ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Any ,_snake_case : Union[str, Any] ,) -> List[str]:
"""simple docstring"""
lowercase__ : str = True
lowercase__ : Dict = FalconModel(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)
lowercase__ : Optional[Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,)
lowercase__ : Dict = model(_snake_case ,attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ,_snake_case : str ,_snake_case : List[Any] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : str ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : str ,) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = FalconForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ,_snake_case : int ,_snake_case : int ,_snake_case : Tuple ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : str ,_snake_case : Union[str, Any] ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = True
lowercase__ : str = True
lowercase__ : str = FalconForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowercase__ : Any = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,)
lowercase__ : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Any = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase__ : List[str] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase__ : str = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase__ : List[str] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
lowercase__ : List[str] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
# select random slice
lowercase__ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ : str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = config_and_inputs
lowercase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase : List[Any] = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : int = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = FalconModelTester(self )
lowercase__ : Any = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ , *lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowercase__ : Dict = alibi
self.model_tester.create_and_check_model(_snake_case ,*_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Optional[int] = input_dict['''input_ids''']
lowercase__ : Optional[Any] = input_ids.ne(1 ).to(_snake_case )
lowercase__ : List[str] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : int = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = '''single_label_classification'''
lowercase__ : Tuple = input_dict['''input_ids''']
lowercase__ : str = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Union[str, Any] = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = input_dict['''input_ids''']
lowercase__ : Union[str, Any] = FalconForCausalLM(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,use_cache=_snake_case )
lowercase__ : int = input_ids.shape[0]
lowercase__ : List[str] = model._convert_to_rw_cache(result.past_key_values )
lowercase__ : List[str] = model._convert_cache_to_standard_format(_snake_case ,_snake_case )
for layer in range(len(_snake_case ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = 3
lowercase__ : Dict = '''multi_label_classification'''
lowercase__ : Optional[int] = input_dict['''input_ids''']
lowercase__ : Dict = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Tuple = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_snake_case ,'''use_cache''' ):
return
lowercase__ : Optional[Any] = model_class(_snake_case ).to(_snake_case )
if "use_cache" not in inputs:
lowercase__ : Optional[int] = True
lowercase__ : List[str] = model(**_snake_case )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowercase__ : Tuple = (
getattr(_snake_case ,'''decoder_layers''' ,_snake_case )
or getattr(_snake_case ,'''num_decoder_layers''' ,_snake_case )
or config.num_hidden_layers
)
lowercase__ : Optional[int] = getattr(_snake_case ,'''num_kv_heads''' ,config.num_attention_heads )
lowercase__ : Optional[int] = getattr(_snake_case ,'''d_model''' ,config.hidden_size )
lowercase__ : Union[str, Any] = embed_dim // num_attention_heads
lowercase__ : Union[str, Any] = outputs['''past_key_values''']
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ : Union[str, Any] = inputs['''input_ids'''].shape
for i in range(_snake_case ):
if config.new_decoder_architecture:
lowercase__ : Union[str, Any] = config.num_attention_heads
elif config.multi_query:
lowercase__ : Union[str, Any] = 1
self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : Dict = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
lowercase__ : int = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(_snake_case )
lowercase__ : int = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
lowercase__ : List[Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
lowercase__ : Dict = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=19 )
lowercase__ : Optional[int] = tokenizer.batch_decode(_snake_case )[0]
self.assertEqual(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : List[Any] = FalconForCausalLM.from_pretrained(_snake_case )
model.eval()
model.to(_snake_case )
lowercase__ : Optional[int] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=4 )
model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=4 )
model.generate(**_snake_case ,num_beams=2 ,max_new_tokens=4 )
@slow
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowercase__ : Any = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : Optional[Any] = FalconForCausalLM.from_pretrained(_snake_case )
model.eval()
model.to(device=_snake_case )
lowercase__ : Any = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
# Test results are the same with and without cache
lowercase__ : Dict = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=20 ,use_cache=_snake_case )
lowercase__ : Tuple = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=20 ,use_cache=_snake_case )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 16 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="image-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"image": Image()} )
lowerCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
lowerCAmelCase : str = "image"
lowerCAmelCase : str = "labels"
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> str:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,_snake_case ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ : Union[str, Any] = copy.deepcopy(self )
lowercase__ : List[Any] = self.label_schema.copy()
lowercase__ : str = features[self.label_column]
lowercase__ : str = label_schema
return task_template
@property
def UpperCAmelCase ( self : List[str] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 16 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__lowerCamelCase , '''_dynamo''' ):
return False
return isinstance(__lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = True ) -> Dict:
lowercase__ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ : Any = is_compiled_module(__lowerCamelCase )
if is_compiled:
lowercase__ : Dict = model
lowercase__ : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : int = model.module
if not keep_fpaa_wrapper:
lowercase__ : str = getattr(__lowerCamelCase , '''forward''' )
lowercase__ : List[str] = model.__dict__.pop('''_original_forward''' , __lowerCamelCase )
if original_forward is not None:
while hasattr(__lowerCamelCase , '''__wrapped__''' ):
lowercase__ : List[Any] = forward.__wrapped__
if forward == original_forward:
break
lowercase__ : Optional[Any] = forward
if getattr(__lowerCamelCase , '''_converted_to_transformer_engine''' , __lowerCamelCase ):
convert_model(__lowerCamelCase , to_transformer_engine=__lowerCamelCase )
if is_compiled:
lowercase__ : Optional[Any] = model
lowercase__ : Tuple = compiled_model
return model
def __UpperCAmelCase ( ) -> str:
PartialState().wait_for_everyone()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCamelCase , __lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCamelCase , __lowerCamelCase )
@contextmanager
def __UpperCAmelCase ( **__lowerCamelCase ) -> Optional[Any]:
for key, value in kwargs.items():
lowercase__ : Tuple = str(__lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
if not hasattr(__lowerCamelCase , '''__qualname__''' ) and not hasattr(__lowerCamelCase , '''__name__''' ):
lowercase__ : Tuple = getattr(__lowerCamelCase , '''__class__''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''__qualname__''' ):
return obj.__qualname__
if hasattr(__lowerCamelCase , '''__name__''' ):
return obj.__name__
return str(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
for key, value in source.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : List[str] = destination.setdefault(__lowerCamelCase , {} )
merge_dicts(__lowerCamelCase , __lowerCamelCase )
else:
lowercase__ : Any = value
return destination
def __UpperCAmelCase ( __lowerCamelCase = None ) -> bool:
if port is None:
lowercase__ : Optional[Any] = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase_ = '\\n Text data.\n Second line of data.'
lowerCAmelCase_ = 'file'
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase__ : Any = bytes(__lowerCamelCase , '''utf-8''' )
with zstd.open(__lowerCamelCase , '''wb''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , '''w''' ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase__ : str = input_paths[compression_format]
lowercase__ : Tuple = tmp_path / '''cache'''
lowercase__ : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
lowercase__ : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
lowercase__ : Dict = f.read()
with open(__lowerCamelCase ) as f:
lowercase__ : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Any = '''custom_cache'''
lowercase__ : Tuple = '''custom_extracted_dir'''
lowercase__ : Tuple = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase__ : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __lowerCamelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__lowerCamelCase ) )
lowercase__ : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase__ : int = xz_file
lowercase__ : List[Any] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
lowercase__ : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
# absolute path
lowercase__ : List[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
lowercase__ : Union[str, Any] = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
# absolute path
lowercase__ : Dict = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
lowercase__ : Dict = '''./__missing_file__.txt'''
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : List[str] = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(__lowerCamelCase ) as f:
lowercase__ : int = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase )
def __UpperCAmelCase ( ) -> Tuple:
with pytest.raises(__lowerCamelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : str = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__lowerCamelCase ):
http_get('''https://huggingface.co''' , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__lowerCamelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__lowerCamelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head('''s3://huggingface.co''' )
| 16 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[int] = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase__ : List[Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase__ : int = 0.0_1
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
lowercase__ : int = time.time()
locka.acquire(__lowerCamelCase )
assert time.time() - _start > timeout
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Any = '''a''' * 10_00 + '''.lock'''
lowercase__ : str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
lowercase__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
locka.acquire(0 )
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Dict = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase_ = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = list(s_dict.keys() )
for key in keys:
lowercase__ : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase__ : Any = new_key.replace(__lowerCamelCase , __lowerCamelCase )
print(f"""{key} -> {new_key}""" )
lowercase__ : Dict = s_dict.pop(__lowerCamelCase )
return s_dict
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = emb.weight.shape
lowercase__ : str = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowercase__ : List[str] = emb.weight.data
return lin_layer
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> bytes:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowercase__ : Optional[int] = os.path.basename(__lowerCamelCase )
lowercase__ : Dict = url.split('''/''' )[-2]
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ) and not os.path.isfile(__lowerCamelCase ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(__lowerCamelCase ):
lowercase__ : List[Any] = open(__lowerCamelCase , '''rb''' ).read()
if hashlib.shaaaa(__lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(__lowerCamelCase ) as source, open(__lowerCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=__lowerCamelCase , unit_divisor=10_24 ) as loop:
while True:
lowercase__ : List[Any] = source.read(81_92 )
if not buffer:
break
output.write(__lowerCamelCase )
loop.update(len(__lowerCamelCase ) )
lowercase__ : int = open(__lowerCamelCase , '''rb''' ).read()
if hashlib.shaaaa(__lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
if ".pt" not in checkpoint_path:
lowercase__ : Any = _download(_MODELS[checkpoint_path] )
else:
lowercase__ : Optional[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
lowercase__ : Tuple = original_checkpoint['''dims''']
lowercase__ : Optional[Any] = original_checkpoint['''model_state_dict''']
lowercase__ : Optional[Any] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(__lowerCamelCase )
rename_keys(__lowerCamelCase )
lowercase__ : List[Any] = True
lowercase__ : Dict = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowercase__ : Optional[int] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__lowerCamelCase , decoder_ffn_dim=__lowerCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowercase__ : Any = WhisperForConditionalGeneration(__lowerCamelCase )
lowercase__ , lowercase__ : Any = model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if len(__lowerCamelCase ) > 0 and not set(__lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase__ : int = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase__ : Optional[Any] = proj_out_weights
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 16 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowerCAmelCase_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
lowerCAmelCase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Dict = ConvBertTokenizer
def __init__( self : Union[str, Any] ,_snake_case : Tuple=None ,_snake_case : Dict=None ,_snake_case : Union[str, Any]=True ,_snake_case : List[str]="[UNK]" ,_snake_case : str="[SEP]" ,_snake_case : str="[PAD]" ,_snake_case : int="[CLS]" ,_snake_case : int="[MASK]" ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=None ,**_snake_case : List[Any] ,) -> Tuple:
"""simple docstring"""
super().__init__(
_snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,tokenize_chinese_chars=_snake_case ,strip_accents=_snake_case ,**_snake_case ,)
lowercase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_snake_case ) != tokenize_chinese_chars
):
lowercase__ : int = getattr(_snake_case ,normalizer_state.pop('''type''' ) )
lowercase__ : Optional[Any] = do_lower_case
lowercase__ : List[str] = strip_accents
lowercase__ : Optional[Any] = tokenize_chinese_chars
lowercase__ : List[str] = normalizer_class(**_snake_case )
lowercase__ : Optional[int] = do_lower_case
def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : Dict=None ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ : str = self._tokenizer.model.save(_snake_case ,name=_snake_case )
return tuple(_snake_case )
| 16 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
# Initialise PyTorch model
lowercase__ : List[Any] = RemBertConfig.from_json_file(__lowerCamelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(__lowerCamelCase ) ) )
lowercase__ : Dict = RemBertModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__lowerCamelCase ) )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __A :
'''simple docstring'''
def __init__( self : Dict ,_snake_case : Union[str, Any]=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=64 ,_snake_case : Optional[int]=None ) -> int:
"""simple docstring"""
lowercase__ : Any = np.random.default_rng(_snake_case )
lowercase__ : str = length
lowercase__ : List[str] = rng.normal(size=(length,) ).astype(np.floataa )
lowercase__ : Dict = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa )
def __len__( self : str ) -> int:
"""simple docstring"""
return self.length
def __getitem__( self : Tuple ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __A ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : Tuple=0 ,_snake_case : Optional[Any]=0 ,_snake_case : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : Tuple = True
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple=None ) -> str:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : Any = False
return x * self.a[0] + self.b[0]
class __A ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : List[Any]=0 ,_snake_case : Optional[Any]=0 ,_snake_case : Optional[Any]=False ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : int = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowercase__ : Tuple = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowercase__ : Any = True
def UpperCAmelCase ( self : int ,_snake_case : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : List[str] = False
return x * self.a + self.b
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowercase__ : str = load_dataset('''csv''' , data_files=__lowerCamelCase )
lowercase__ : Union[str, Any] = datasets['''train'''].unique('''label''' )
lowercase__ : List[Any] = {v: i for i, v in enumerate(__lowerCamelCase )}
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : int = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
if "label" in examples:
lowercase__ : str = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Any = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase__ : int = DataLoader(tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=2 )
lowercase__ : Optional[Any] = DataLoader(tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 16 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ,return_dict=_snake_case ).to(_snake_case )
lowercase__ : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ : Optional[Any] = tokenizer('''Hello there''' ,return_tensors='''pt''' ).input_ids
lowercase__ : List[Any] = tokenizer('''Hi I am''' ,return_tensors='''pt''' ).input_ids
lowercase__ : Optional[Any] = model(input_ids.to(_snake_case ) ,labels=labels.to(_snake_case ) ).loss
lowercase__ : Tuple = -(labels.shape[-1] * loss.item())
lowercase__ : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 16 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
lowercase__ : str = hex_num[0] == '''-'''
if is_negative:
lowercase__ : Union[str, Any] = hex_num[1:]
try:
lowercase__ : Tuple = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
lowercase__ : Union[str, Any] = ''''''
while int_num > 0:
lowercase__ : Any = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 1 |
"""simple docstring"""
lowerCAmelCase_ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 16 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,*_snake_case : int ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
lowercase__ : List[str] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
lowerCAmelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase_ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
assert len(str(__lowerCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase__ : Tuple = year // 1_00
lowercase__ : Dict = (5 * (century % 4) + 2) % 7
lowercase__ : List[str] = year % 1_00
lowercase__ : int = centurian % 12
lowercase__ : str = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase__ : Optional[int] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase__ : str = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Any ,_snake_case : str ,_snake_case : List[Any]=2 ,_snake_case : List[Any]=8 ,_snake_case : List[Any]=True ,_snake_case : Optional[int]=True ,_snake_case : str=True ,_snake_case : int=True ,_snake_case : str=99 ,_snake_case : Tuple=16 ,_snake_case : List[str]=5 ,_snake_case : Any=2 ,_snake_case : Dict=36 ,_snake_case : Optional[int]="gelu" ,_snake_case : Any=0.0 ,_snake_case : int=0.0 ,_snake_case : Union[str, Any]=512 ,_snake_case : Optional[Any]=16 ,_snake_case : Optional[Any]=2 ,_snake_case : Optional[Any]=0.02 ,_snake_case : Tuple=3 ,_snake_case : List[str]=4 ,_snake_case : Tuple=None ,) -> int:
"""simple docstring"""
lowercase__ : List[Any] = parent
lowercase__ : List[str] = batch_size
lowercase__ : int = seq_length
lowercase__ : Dict = is_training
lowercase__ : List[Any] = use_input_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : List[str] = use_labels
lowercase__ : str = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[Any] = type_vocab_size
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : int = initializer_range
lowercase__ : Any = num_labels
lowercase__ : List[Any] = num_choices
lowercase__ : Optional[int] = scope
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase__ : List[Any] = None
lowercase__ : Tuple = None
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,)
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.get_config()
lowercase__ : int = 300
return config
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = self.prepare_config_and_inputs()
lowercase__ : Optional[int] = True
lowercase__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : Tuple ,_snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = MraModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case )
lowercase__ : Optional[Any] = model(_snake_case ,token_type_ids=_snake_case )
lowercase__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : int ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : List[str] ,_snake_case : Any ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : Dict ,_snake_case : Tuple ,) -> Tuple:
"""simple docstring"""
lowercase__ : Any = True
lowercase__ : Optional[Any] = MraModel(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)
lowercase__ : int = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,encoder_hidden_states=_snake_case ,)
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : str ,_snake_case : int ,_snake_case : Optional[int] ,_snake_case : Tuple ,_snake_case : Tuple ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = MraForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = MraForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,start_positions=_snake_case ,end_positions=_snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : str ,_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : str ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Tuple ,_snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Tuple = MraForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Tuple ,_snake_case : Dict ,_snake_case : Any ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : List[Any] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.num_labels
lowercase__ : int = MraForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : Any ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = self.num_choices
lowercase__ : str = MraForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ : Optional[Any] = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = config_and_inputs
lowercase__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase : int = False
lowerCAmelCase : int = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = ()
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = MraModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Any = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = MraModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
lowercase__ : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : int = model(_snake_case )[0]
lowercase__ : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,_snake_case )
lowercase__ : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_snake_case ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
lowercase__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : int = model(_snake_case )[0]
lowercase__ : Dict = 50_265
lowercase__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,_snake_case )
lowercase__ : Union[str, Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_snake_case ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : int = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
lowercase__ : List[str] = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : List[str] = model(_snake_case )[0]
lowercase__ : Any = 50_265
lowercase__ : List[str] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape ,_snake_case )
lowercase__ : Union[str, Any] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_snake_case ,atol=1e-4 ) )
| 16 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=_snake_case ,)
assert hasattr(self ,'''env''' )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Any=1 ) -> Any:
"""simple docstring"""
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f"""{self.env.base_job_name}-single""" ,instance_count=_snake_case ,instance_type=self.instance_type ,debugger_hook_config=_snake_case ,hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version='''py36''' ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
TrainingJobAnalytics(_snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowercase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ : Any = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,_snake_case )
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = BigBirdTokenizer
lowerCAmelCase : Any = BigBirdTokenizerFast
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : List[Any] = True
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
lowercase__ : Tuple = self.tokenizer_class(_snake_case ,keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = '''<s>'''
lowercase__ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''[MASK]''' )
self.assertEqual(len(_snake_case ) ,1_004 )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,1_000 )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : int = self.get_rust_tokenizer()
lowercase__ : List[Any] = '''I was born in 92000, and this is falsé.'''
lowercase__ : Dict = tokenizer.tokenize(_snake_case )
lowercase__ : Any = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : Any = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
lowercase__ : Optional[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : Dict = tokenizer.encode(_snake_case )
lowercase__ : Tuple = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = BigBirdTokenizer(_snake_case ,keep_accents=_snake_case )
lowercase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) ,[285, 46, 10, 170, 382] ,)
lowercase__ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase__ : Optional[int] = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
lowercase__ : str = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
@cached_property
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = '''Hello World!'''
lowercase__ : Union[str, Any] = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_snake_case ,self.big_tokenizer.encode(_snake_case ) )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase__ : int = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_snake_case ,self.big_tokenizer.encode(_snake_case ) )
@require_torch
@slow
def UpperCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase__ : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ : Union[str, Any] = ''' '''.join(_snake_case )
lowercase__ : int = self.big_tokenizer.encode_plus(_snake_case ,return_tensors='''pt''' ,return_token_type_ids=_snake_case )
lowercase__ : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] ,return_tensors='''pt''' ,return_token_type_ids=_snake_case )
lowercase__ : Tuple = BigBirdConfig(attention_type='''original_full''' )
lowercase__ : Any = BigBirdModel(_snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_snake_case )
model(**_snake_case )
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase__ : int = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {'''input_ids''': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name='''google/bigbird-roberta-base''' ,revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' ,)
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase_ = '▁'
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = AlbertTokenizer
def __init__( self : List[Any] ,_snake_case : Tuple=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Dict=False ,_snake_case : Optional[int]="[CLS]" ,_snake_case : List[Any]="[SEP]" ,_snake_case : Tuple="<unk>" ,_snake_case : int="[SEP]" ,_snake_case : List[Any]="<pad>" ,_snake_case : Optional[int]="[CLS]" ,_snake_case : int="[MASK]" ,**_snake_case : Optional[Any] ,) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = (
AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ,normalized=_snake_case )
if isinstance(_snake_case ,_snake_case )
else mask_token
)
super().__init__(
_snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,**_snake_case ,)
lowercase__ : Optional[int] = do_lower_case
lowercase__ : str = remove_space
lowercase__ : Dict = keep_accents
lowercase__ : List[str] = vocab_file
lowercase__ : str = False if not self.vocab_file else True
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : Dict ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[str] = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file ,_snake_case )
return (out_vocab_file,)
| 16 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 1 |
"""simple docstring"""
from functools import lru_cache
def __UpperCAmelCase ( __lowerCamelCase ) -> set:
lowercase__ : Optional[Any] = 2
lowercase__ : Any = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCamelCase )
if n > 1:
factors.add(__lowerCamelCase )
return factors
@lru_cache
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
return len(unique_prime_factors(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) ) in (0, 1)
def __UpperCAmelCase ( __lowerCamelCase ) -> list:
lowercase__ : str = 2
while True:
# Increment each value of a generated range
lowercase__ : Any = [base + i for i in range(__lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ : List[Any] = [upf_len(__lowerCamelCase ) for x in group]
checker.append(__lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def __UpperCAmelCase ( __lowerCamelCase = 4 ) -> int:
lowercase__ : Dict = run(__lowerCamelCase )
return results[0] if len(__lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = KandinskyVaaPriorPipeline
lowerCAmelCase : str = ["prompt"]
lowerCAmelCase : List[str] = ["prompt", "negative_prompt"]
lowerCAmelCase : List[str] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowerCAmelCase : str = False
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_snake_case )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowercase__ : Tuple = PriorTransformer(**_snake_case )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase__ : Union[str, Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=224 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
lowercase__ : Any = CLIPVisionModelWithProjection(_snake_case )
return model
@property
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_snake_case ,do_normalize=_snake_case ,do_resize=_snake_case ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=224 ,)
return image_processor
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = self.dummy_prior
lowercase__ : Optional[Any] = self.dummy_image_encoder
lowercase__ : Optional[int] = self.dummy_text_encoder
lowercase__ : Optional[int] = self.dummy_tokenizer
lowercase__ : Tuple = self.dummy_image_processor
lowercase__ : Dict = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=1_000 ,clip_sample=_snake_case ,clip_sample_range=10.0 ,)
lowercase__ : Optional[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : int=0 ) -> Optional[int]:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
else:
lowercase__ : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : str = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = '''cpu'''
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**_snake_case )
lowercase__ : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = pipe(**self.get_dummy_inputs(_snake_case ) )
lowercase__ : List[Any] = output.image_embeds
lowercase__ : Dict = pipe(
**self.get_dummy_inputs(_snake_case ) ,return_dict=_snake_case ,)[0]
lowercase__ : Any = image[0, -10:]
lowercase__ : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowercase__ : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : Any = torch_device == '''cpu'''
lowercase__ : Optional[Any] = True
lowercase__ : int = False
self._test_inference_batch_single_identical(
test_max_difference=_snake_case ,relax_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
@skip_mps
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = torch_device == '''cpu'''
lowercase__ : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
| 16 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __A :
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : Union[str, Any] ,_snake_case : int=13 ,_snake_case : Tuple=7 ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Union[str, Any]=False ,_snake_case : List[Any]=True ,_snake_case : Any=99 ,_snake_case : str=32 ,_snake_case : int=5 ,_snake_case : int=4 ,_snake_case : int=37 ,_snake_case : Tuple="gelu" ,_snake_case : int=0.1 ,_snake_case : List[str]=0.1 ,_snake_case : Union[str, Any]=512 ,_snake_case : Optional[int]=16 ,_snake_case : int=2 ,_snake_case : List[Any]=0.02 ,_snake_case : Optional[int]=3 ,_snake_case : List[str]=4 ,_snake_case : Union[str, Any]=None ,) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Dict = batch_size
lowercase__ : int = seq_length
lowercase__ : Optional[int] = is_training
lowercase__ : Optional[int] = use_input_mask
lowercase__ : str = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : str = num_labels
lowercase__ : Optional[Any] = num_choices
lowercase__ : Union[str, Any] = scope
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase__ : Tuple = None
lowercase__ : List[str] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : str = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[str] ,_snake_case : Dict ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : List[Any] ,_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = LlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case )
lowercase__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,) -> List[str]:
"""simple docstring"""
lowercase__ : int = True
lowercase__ : Optional[int] = LlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)
lowercase__ : List[Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,)
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : Any ,_snake_case : str ,_snake_case : int ,_snake_case : Dict ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : Any ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = LlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Tuple ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : List[str] ,_snake_case : str ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Tuple ,_snake_case : str ,_snake_case : Any ,) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = True
lowercase__ : Dict = True
lowercase__ : Optional[int] = LlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowercase__ : Any = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,)
lowercase__ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase__ : Tuple = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase__ : Dict = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase__ : Optional[int] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
lowercase__ : str = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
# select random slice
lowercase__ : Optional[Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase : int = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : int = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : List[str] = False
lowerCAmelCase : List[Any] = False
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : str = LlamaModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : List[str] = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = input_dict['''input_ids''']
lowercase__ : Tuple = input_ids.ne(1 ).to(_snake_case )
lowercase__ : List[str] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : List[Any] = LlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : int = '''single_label_classification'''
lowercase__ : List[str] = input_dict['''input_ids''']
lowercase__ : Any = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Tuple = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Optional[Any] = LlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = 3
lowercase__ : List[Any] = '''multi_label_classification'''
lowercase__ : Any = input_dict['''input_ids''']
lowercase__ : Dict = input_ids.ne(1 ).to(_snake_case )
lowercase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Union[str, Any] = LlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ids_tensor([1, 10] ,config.vocab_size )
lowercase__ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : Optional[int] = LlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowercase__ : Dict = original_model(_snake_case ).last_hidden_state
lowercase__ : str = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
lowercase__ : Tuple = LlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowercase__ : Union[str, Any] = scaled_model(_snake_case ).last_hidden_state
lowercase__ : Tuple = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
lowercase__ : Optional[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase__ : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_snake_case ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ : Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
lowercase__ : List[Any] = model(torch.tensor(_snake_case ) )
# Expected mean on dim = -1
lowercase__ : Tuple = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ : Optional[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_snake_case ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ : Tuple = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
lowercase__ : str = model(torch.tensor(_snake_case ) )
# Expected mean on dim = -1
lowercase__ : List[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ : Tuple = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
lowercase__ : Optional[Any] = model(torch.tensor(_snake_case ) )
lowercase__ : Union[str, Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
lowercase__ : Optional[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_snake_case ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowercase__ : str = '''Simply put, the theory of relativity states that '''
lowercase__ : List[Any] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase__ : int = tokenizer.encode(_snake_case ,return_tensors='''pt''' )
lowercase__ : Any = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=_snake_case )
# greedy generation outputs
lowercase__ : List[str] = model.generate(_snake_case ,max_new_tokens=64 ,top_p=_snake_case ,temperature=1 ,do_sample=_snake_case )
lowercase__ : Any = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_snake_case )
self.assertEqual(_snake_case ,_snake_case )
| 16 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[float, float, float]:
lowercase__ : Union[str, Any] = point_y / 4 / point_x
lowercase__ : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowercase__ : Dict = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowercase__ : Any = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowercase__ : Optional[Any] = outgoing_gradient**2 + 4
lowercase__ : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowercase__ : Union[str, Any] = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowercase__ : List[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowercase__ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowercase__ : Any = x_minus if isclose(__lowerCamelCase , __lowerCamelCase ) else x_plus
lowercase__ : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCAmelCase ( __lowerCamelCase = 1.4 , __lowerCamelCase = -9.6 ) -> int:
lowercase__ : int = 0
lowercase__ : float = first_x_coord
lowercase__ : float = first_y_coord
lowercase__ : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowercase__ , lowercase__ , lowercase__ : List[Any] = next_point(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 16 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : Any=3 ,_snake_case : List[Any]=32 ,_snake_case : str=3 ,_snake_case : List[Any]=10 ,_snake_case : Any=[8, 16, 32, 64] ,_snake_case : Optional[int]=[1, 1, 2, 1] ,_snake_case : Dict=True ,_snake_case : Dict=True ,_snake_case : List[Any]="relu" ,_snake_case : int=3 ,_snake_case : Dict=None ,_snake_case : List[Any]=["stage2", "stage3", "stage4"] ,_snake_case : List[Any]=[2, 3, 4] ,_snake_case : int=1 ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = parent
lowercase__ : Dict = batch_size
lowercase__ : Any = image_size
lowercase__ : Dict = num_channels
lowercase__ : int = embeddings_size
lowercase__ : str = hidden_sizes
lowercase__ : Tuple = depths
lowercase__ : Tuple = is_training
lowercase__ : str = use_labels
lowercase__ : int = hidden_act
lowercase__ : List[Any] = num_labels
lowercase__ : Dict = scope
lowercase__ : Dict = len(_snake_case )
lowercase__ : Optional[int] = out_features
lowercase__ : List[str] = out_indices
lowercase__ : Any = num_groups
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : str ) -> int:
"""simple docstring"""
lowercase__ : Tuple = BitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : str ,_snake_case : List[str] ,_snake_case : int ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = BitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : str ) -> Dict:
"""simple docstring"""
lowercase__ : int = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase : List[str] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Any = False
lowerCAmelCase : str = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = BitModelTester(self )
lowercase__ : Tuple = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(config=_snake_case )
for name, module in model.named_modules():
if isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Optional[Any] ):
lowercase__ : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : int = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : Dict = layer_type
lowercase__ : Tuple = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = BitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> List[str]:
lowercase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_snake_case )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : str = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )
# verify the logits
lowercase__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : List[str] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@require_torch
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase : Any = BitConfig
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = BitModelTester(self )
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase_ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "retribert"
def __init__( self : Optional[Any] ,_snake_case : int=30_522 ,_snake_case : Any=768 ,_snake_case : List[Any]=8 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : int=0.1 ,_snake_case : Optional[Any]=0.1 ,_snake_case : str=512 ,_snake_case : Union[str, Any]=2 ,_snake_case : Optional[int]=0.02 ,_snake_case : str=1e-12 ,_snake_case : List[str]=True ,_snake_case : Dict=128 ,_snake_case : str=0 ,**_snake_case : Union[str, Any] ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
lowercase__ : List[str] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : List[Any] = hidden_act
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : int = share_encoders
lowercase__ : Dict = projection_dim
| 16 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = ["image_processor", "tokenizer"]
lowerCAmelCase : Optional[Any] = "FlavaImageProcessor"
lowerCAmelCase : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Union[str, Any] ,_snake_case : List[str]=None ,_snake_case : List[str]=None ,**_snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Dict = kwargs.pop('''feature_extractor''' )
lowercase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : str = self.image_processor
def __call__( self : Tuple ,_snake_case : Optional[ImageInput] = None ,_snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None ,_snake_case : bool = True ,_snake_case : Union[bool, str, PaddingStrategy] = False ,_snake_case : Union[bool, str, TruncationStrategy] = False ,_snake_case : Optional[int] = None ,_snake_case : int = 0 ,_snake_case : Optional[int] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : bool = False ,_snake_case : bool = False ,_snake_case : bool = False ,_snake_case : bool = False ,_snake_case : bool = True ,_snake_case : Optional[Union[str, TensorType]] = None ,**_snake_case : Any ,) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : List[str] = self.tokenizer(
text=_snake_case ,add_special_tokens=_snake_case ,padding=_snake_case ,truncation=_snake_case ,max_length=_snake_case ,stride=_snake_case ,pad_to_multiple_of=_snake_case ,return_token_type_ids=_snake_case ,return_attention_mask=_snake_case ,return_overflowing_tokens=_snake_case ,return_special_tokens_mask=_snake_case ,return_offsets_mapping=_snake_case ,return_length=_snake_case ,verbose=_snake_case ,return_tensors=_snake_case ,**_snake_case ,)
if images is not None:
lowercase__ : List[Any] = self.image_processor(
_snake_case ,return_image_mask=_snake_case ,return_codebook_pixels=_snake_case ,return_tensors=_snake_case ,**_snake_case ,)
if text is not None and images is not None:
encoding.update(_snake_case )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : int ,*_snake_case : Any ,**_snake_case : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[str] ,*_snake_case : List[Any] ,**_snake_case : Any ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = self.tokenizer.model_input_names
lowercase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
@property
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,_snake_case ,)
return self.image_processor
| 16 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = inspect.getfile(accelerate.test_utils )
lowerCAmelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
lowerCAmelCase : Tuple = ["accelerate", "launch"]
lowerCAmelCase : Union[str, Any] = Path.home() / ".cache/huggingface/accelerate"
lowerCAmelCase : Optional[int] = "default_config.yaml"
lowerCAmelCase : Union[str, Any] = config_folder / config_file
lowerCAmelCase : Union[str, Any] = config_folder / "_default_config.yaml"
lowerCAmelCase : Optional[Any] = Path("tests/test_configs" )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase ( cls : str ) -> Dict:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_snake_case ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_snake_case ), self.test_file_path] ,env=os.environ.copy() )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
execute_subprocess_async(['''accelerate''', '''test'''] ,env=os.environ.copy() )
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = "test-tpu"
lowerCAmelCase : Dict = "us-central1-a"
lowerCAmelCase : List[str] = "ls"
lowerCAmelCase : Any = ["accelerate", "tpu-config"]
lowerCAmelCase : Any = "cd /usr/share"
lowerCAmelCase : Union[str, Any] = "tests/test_samples/test_command_file.sh"
lowerCAmelCase : List[str] = "Running gcloud compute tpus tpu-vm ssh"
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] ,return_stdout=_snake_case )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowercase__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : int = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_snake_case ,)
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] ,return_stdout=_snake_case ,)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_snake_case ,)
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ = get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> List[Any]:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : Optional[int] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : Union[str, Any] = os.path.join(__lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
logger.info(f"""Saving model to {ckpt_dir}""" )
lowercase__ : Any = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__lowerCamelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> Tuple:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__lowerCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : List[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
lowercase__ : List[str] = torch.load(__lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : str = (
os.path.join(__lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
lowercase__ : Dict = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__lowerCamelCase , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , planner=DefaultLoadPlanner() , )
lowercase__ : Optional[int] = state_dict['''model''']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> int:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : Tuple = FSDP.optim_state_dict(__lowerCamelCase , __lowerCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase__ : List[Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase__ : Any = os.path.join(__lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> Any:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase__ : Optional[int] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase__ : Tuple = torch.load(__lowerCamelCase )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase__ : Any = (
os.path.join(__lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
lowercase__ : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , )
lowercase__ : int = optim_state['''optimizer''']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
lowercase__ : List[Any] = FSDP.optim_state_dict_to_load(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
optimizer.load_state_dict(__lowerCamelCase )
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : str ,*_snake_case : List[Any] ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["image_processor", "tokenizer"]
lowerCAmelCase : str = "CLIPImageProcessor"
lowerCAmelCase : str = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : int ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=None ,**_snake_case : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : int = kwargs.pop('''feature_extractor''' )
lowercase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
def __call__( self : Any ,_snake_case : List[str]=None ,_snake_case : str=None ,_snake_case : Optional[Any]=None ,**_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : Tuple = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : List[Any] ,*_snake_case : List[Any] ,**_snake_case : Dict ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : str ,**_snake_case : str ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.tokenizer.model_input_names
lowercase__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Tuple:
# Recurse if needed
if "." in tensor_name:
lowercase__ : List[str] = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase__ : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowercase__ : Optional[int] = new_module
lowercase__ : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
lowercase__ : List[Any] = tensor_name in module._buffers
lowercase__ : Any = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
lowercase__ : Any = False
lowercase__ : List[str] = False
if is_buffer or not is_bitsandbytes_available():
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
else:
lowercase__ : Any = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase__ : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase__ : Tuple = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase__ : List[Any] = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase__ : str = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase__ : str = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase__ : Tuple = torch.tensor(__lowerCamelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
lowercase__ : str = new_value.T
lowercase__ : str = old_value.__dict__
if is_abit:
lowercase__ : Union[str, Any] = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
lowercase__ : Optional[int] = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
lowercase__ : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
lowercase__ : Union[str, Any] = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase__ : List[Any] = value.to(__lowerCamelCase )
else:
lowercase__ : Optional[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
lowercase__ : Tuple = new_value
else:
lowercase__ : List[Any] = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
lowercase__ : int = new_value
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False ) -> List[str]:
for name, module in model.named_children():
if current_key_name is None:
lowercase__ : Tuple = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ , lowercase__ : str = module.weight.shape
else:
lowercase__ : str = module.in_features
lowercase__ : Union[str, Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase__ : Union[str, Any] = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase__ : List[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase__ : Tuple = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase__ : Optional[int] = True
# Store the module class in case we need to transpose the weight later
lowercase__ : str = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ : Any = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
lowercase__ : Dict = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase__ , lowercase__ : str = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __UpperCAmelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> str:
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase__ : Union[str, Any] = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ : Union[str, Any] = sum(__lowerCamelCase , [] )
lowercase__ : Dict = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase__ : Optional[Any] = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ : Optional[int] = list(model.named_children() )
lowercase__ : int = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ : int = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase__ : Dict = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase__ : Dict = ['''.weight''', '''.bias''']
lowercase__ : Dict = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ : List[str] = name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 16 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 1 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
# ===== initialization =====
lowercase__ : List[str] = Mock()
lowercase__ : Any = conn, Mock()
lowercase__ : Optional[int] = iter([1, None] )
lowercase__ : Union[str, Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = XLMTokenizer
lowerCAmelCase : Tuple = False
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = '''lower newer'''
lowercase__ : Optional[int] = '''lower newer'''
return input_text, output_text
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowercase__ : str = XLMTokenizer(self.vocab_file ,self.merges_file )
lowercase__ : Tuple = '''lower'''
lowercase__ : List[Any] = ['''low''', '''er</w>''']
lowercase__ : str = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : Optional[int] = tokens + ['''<unk>''']
lowercase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,_snake_case )
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowercase__ : str = tokenizer.encode('''sequence builders''' ,add_special_tokens=_snake_case )
lowercase__ : List[Any] = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=_snake_case )
lowercase__ : Any = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 16 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase_ = False
class __A ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Any = pipe(
image=_snake_case ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowercase__ : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[Any] = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __UpperCAmelCase ( ) -> Tuple:
plt.scatter(__lowerCamelCase , __lowerCamelCase , color='''red''' )
plt.plot(__lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCamelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 16 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def __UpperCAmelCase ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCAmelCase_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCAmelCase_ = 'zero2'
lowerCAmelCase_ = 'zero3'
lowerCAmelCase_ = [ZEROa, ZEROa]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowercase__ : Optional[Any] = parameterized.to_safe_name('''_'''.join(str(__lowerCamelCase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A ( A_ ):
'''simple docstring'''
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : str ,_snake_case : Tuple ,_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Any ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : str ,_snake_case : int = 10 ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : bool = True ,) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = models[model]
lowercase__ : Optional[int] = self.run_trainer(
stage=_snake_case ,model_name=_snake_case ,eval_steps=_snake_case ,num_train_epochs=1 ,distributed=_snake_case ,fpaa=_snake_case ,)
self.do_checks(_snake_case )
return output_dir
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : str ,_snake_case : int = 10 ,_snake_case : int = 1 ,_snake_case : bool = True ,_snake_case : bool = True ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = self.get_auto_remove_tmp_dir('''./xxx''' ,after=_snake_case )
lowercase__ : Union[str, Any] = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowercase__ : Any = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowercase__ : Optional[int] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowercase__ : int = self.get_launcher(_snake_case )
lowercase__ : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case ,env=self.get_env() )
return output_dir
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str]=False ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = min(2 ,get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 16 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' ,from_pt=_snake_case ,dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,controlnet=_snake_case ,from_pt=_snake_case ,dtype=jnp.bfloataa )
lowercase__ : Union[str, Any] = controlnet_params
lowercase__ : Optional[Any] = '''bird'''
lowercase__ : str = jax.device_count()
lowercase__ : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : str = jax.random.PRNGKey(0 )
lowercase__ : Dict = jax.random.split(_snake_case ,jax.device_count() )
lowercase__ : Any = replicate(_snake_case )
lowercase__ : List[str] = shard(_snake_case )
lowercase__ : Dict = shard(_snake_case )
lowercase__ : Optional[Any] = pipe(
prompt_ids=_snake_case ,image=_snake_case ,params=_snake_case ,prng_seed=_snake_case ,num_inference_steps=50 ,jit=_snake_case ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Optional[int] = images[0, 253:256, 253:256, -1]
lowercase__ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Any = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' ,from_pt=_snake_case ,dtype=jnp.bfloataa )
lowercase__ , lowercase__ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,controlnet=_snake_case ,from_pt=_snake_case ,dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : Optional[Any] = '''Chef in the kitchen'''
lowercase__ : Union[str, Any] = jax.device_count()
lowercase__ : List[str] = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowercase__ : Tuple = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : Dict = jax.random.PRNGKey(0 )
lowercase__ : Optional[int] = jax.random.split(_snake_case ,jax.device_count() )
lowercase__ : Any = replicate(_snake_case )
lowercase__ : Optional[int] = shard(_snake_case )
lowercase__ : List[str] = shard(_snake_case )
lowercase__ : str = pipe(
prompt_ids=_snake_case ,image=_snake_case ,params=_snake_case ,prng_seed=_snake_case ,num_inference_steps=50 ,jit=_snake_case ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase__ : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Dict = images[0, 253:256, 253:256, -1]
lowercase__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Tuple = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 1 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = PriorTransformer
lowerCAmelCase : int = "hidden_states"
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = 4
lowercase__ : List[Any] = 8
lowercase__ : Any = 7
lowercase__ : int = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Any = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : str ,_snake_case : str=0 ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Optional[Any] = 4
lowercase__ : Optional[int] = 8
lowercase__ : Any = 7
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (4, 8)
@property
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return (4, 8)
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
lowercase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_snake_case )
lowercase__ : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.prepare_init_args_and_inputs_for_common()
lowercase__ : List[str] = self.model_class(**_snake_case )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,_snake_case )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
lowercase__ : Dict = model.to(_snake_case )
if hasattr(_snake_case ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
lowercase__ : Dict = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )[0]
lowercase__ : str = output[0, :5].flatten().cpu()
print(_snake_case )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ : Optional[Any] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_snake_case ,_snake_case ,rtol=1e-2 ) )
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=1 ,_snake_case : Tuple=768 ,_snake_case : List[Any]=77 ,_snake_case : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = embedding_dim
lowercase__ : Tuple = num_embeddings
lowercase__ : int = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(_snake_case )
lowercase__ : List[str] = self.get_dummy_seed_input(seed=_snake_case )
with torch.no_grad():
lowercase__ : List[str] = model(**_snake_case )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ : List[str] = sample[0, :8].flatten().cpu()
print(_snake_case )
lowercase__ : Optional[Any] = torch.tensor(_snake_case )
assert torch_all_close(_snake_case ,_snake_case ,atol=1e-3 )
| 16 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['MobileViTFeatureExtractor']
lowerCAmelCase_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : int = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
with self.assertRaises(_snake_case ):
lowercase__ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(_snake_case ):
lowercase__ : Dict = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('''bool''' ) ,type=Value('''int64''' ) ) )
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] ,type=Value('''int32''' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase__ : List[str] = pa.array(TypedSequence(['''foo''', '''bar'''] ,type=Value('''int64''' ) ) )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = pa.array(TypedSequence(['''foo''', '''bar'''] ,try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type ,pa.string() )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : str = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'''int64''' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'''int64''' ) )
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase__ : List[str] = pa.array(TypedSequence(['''foo''', '''bar'''] ,type=ArrayaD((1, 3) ,'''int64''' ) ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'''int64''' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'''int64''' ) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = pa.array(TypedSequence(['''foo''', '''bar'''] ,try_type=ArrayaD((1, 3) ,'''int64''' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
import PIL.Image
lowercase__ : List[Any] = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' ,side_effect=_snake_case ) as mock_cast_to_python_objects:
lowercase__ : int = pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] ,type=Image() ) )
lowercase__ , lowercase__ : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' ,_snake_case )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Optional[int] = pa.BufferReader(__lowerCamelCase ) if isinstance(__lowerCamelCase , pa.Buffer ) else pa.memory_map(__lowerCamelCase )
lowercase__ : Dict = pa.ipc.open_stream(__lowerCamelCase )
lowercase__ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
lowercase__ : List[Any] = pa.BufferOutputStream()
lowercase__ : Dict = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase__ , lowercase__ : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Any = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> List[str]:
lowercase__ : List[Any] = pa.BufferOutputStream()
lowercase__ : str = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__lowerCamelCase , features=__lowerCamelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
lowercase__ , lowercase__ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowercase__ : Optional[int] = pa.BufferReader(output.getvalue() )
lowercase__ : Tuple = pa.ipc.open_stream(__lowerCamelCase )
lowercase__ : pa.Table = f.read_all()
lowercase__ : Dict = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__lowerCamelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt='''split_name''' , check_duplicates=__lowerCamelCase , ) as writer:
with pytest.raises(__lowerCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
lowercase__ , lowercase__ : Dict = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt='''split_name''' , check_duplicates=__lowerCamelCase , ) as writer:
with pytest.raises(__lowerCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
lowercase__ , lowercase__ : str = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt='''split_name''' , check_duplicates=__lowerCamelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
lowercase__ , lowercase__ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
lowercase__ : int = pa.BufferOutputStream()
lowercase__ : Tuple = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
lowercase__ , lowercase__ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Tuple = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Dict = pa.BufferOutputStream()
lowercase__ : Tuple = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
lowercase__ , lowercase__ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Any = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = pa.BufferOutputStream()
lowercase__ : Dict = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
lowercase__ , lowercase__ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
lowercase__ : Dict = os.path.join(__lowerCamelCase , '''test.arrow''' )
with ArrowWriter(path=__lowerCamelCase , schema=pa.schema(__lowerCamelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
lowercase__ , lowercase__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(__lowerCamelCase , 1 )
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
if pa.types.is_list(__lowerCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
if isinstance(lst[0] , __lowerCamelCase ):
change_first_primitive_element_in_list(lst[0] , __lowerCamelCase )
else:
lowercase__ : Optional[Any] = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[int] = pa.array(TypedSequence(__lowerCamelCase , optimized_int_type=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
# in range
lowercase__ : Optional[int] = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowercase__ : Tuple = copy.deepcopy(__lowerCamelCase )
lowercase__ : Dict = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__lowerCamelCase , __lowerCamelCase )
lowercase__ : int = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__lowerCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : int = '''mock://dataset-train.arrow'''
with ArrowWriter(path=__lowerCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__lowerCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase__ , lowercase__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__lowerCamelCase )
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=__lowerCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase__ , lowercase__ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowercase__ : str = pa.BufferReader(output.getvalue() )
lowercase__ : pa.Table = pq.read_table(__lowerCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import PIL.Image
lowercase__ : Dict = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__lowerCamelCase , format='''png''' )
lowercase__ : Tuple = pa.BufferOutputStream()
with ParquetWriter(
stream=__lowerCamelCase , features=Features({'''image''': Image()} ) , embed_local_files=__lowerCamelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
lowercase__ : Any = pa.BufferReader(output.getvalue() )
lowercase__ : pa.Table = pq.read_table(__lowerCamelCase )
lowercase__ : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , __lowerCamelCase )
with open(__lowerCamelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __UpperCAmelCase ( ) -> Union[str, Any]:
lowercase__ : Optional[Any] = pa.schema([pa.field('''col_1''' , pa.string() , nullable=__lowerCamelCase )] )
lowercase__ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=__lowerCamelCase ) as writer:
writer._build_writer(inferred_schema=__lowerCamelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.