code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase_ ( UpperCAmelCase_):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = dataset
a_ = process
a_ = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = self.dataset[i]
a_ = self.process(_lowercase , **self.params )
return processed
class lowercase_ ( UpperCAmelCase_):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
a_ = loader
a_ = infer
a_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
a_ = None
a_ = loader_batch_size
# Internal bookkeeping
a_ = None
a_ = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
a_ = iter(self.loader )
return self
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
a_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
a_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowercase , _lowercase ):
# Convert ModelOutput to tuple first
a_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
a_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
a_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowercase , _lowercase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
a_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
a_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
a_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
a_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
a_ = self._loader_batch_data.__class__(_lowercase )
self._loader_batch_index += 1
return result
def lowercase__ ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
a_ = next(self.iterator )
a_ = self.infer(_lowercase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowercase , torch.Tensor ):
a_ = processed
else:
a_ = list(processed.keys() )[0]
a_ = processed[key]
if isinstance(_lowercase , _lowercase ):
a_ = len(_lowercase )
else:
a_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a_ = observed_batch_size
# Setting internal index to unwrap the batch
a_ = processed
a_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase_ ( UpperCAmelCase_):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase , _lowercase )
def __iter__( self ):
"""simple docstring"""
a_ = iter(self.loader )
a_ = None
return self
def lowercase__ ( self ):
"""simple docstring"""
if self.subiterator is None:
a_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
a_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
a_ = self.infer(next(self.iterator ) , **self.params )
a_ = next(self.subiterator )
return processed
class lowercase_ ( UpperCAmelCase_):
"""simple docstring"""
def __iter__( self ):
"""simple docstring"""
a_ = iter(self.loader )
return self
def lowercase__ ( self ):
"""simple docstring"""
a_ = False
a_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
a_ = self.loader_batch_item()
a_ = item.pop("""is_last""" )
accumulator.append(_lowercase )
if is_last:
return accumulator
while not is_last:
a_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowercase , torch.Tensor ):
a_ = processed
else:
a_ = list(processed.keys() )[0]
a_ = processed[key]
if isinstance(_lowercase , _lowercase ):
a_ = len(_lowercase )
else:
a_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a_ = observed_batch_size
a_ = processed
a_ = 0
while self._loader_batch_index < self.loader_batch_size:
a_ = self.loader_batch_item()
a_ = item.pop("""is_last""" )
accumulator.append(_lowercase )
if is_last:
return accumulator
else:
a_ = processed
a_ = item.pop("""is_last""" )
accumulator.append(_lowercase )
return accumulator
class lowercase_ ( UpperCAmelCase_):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = dataset
a_ = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _UpperCAmelCase ):
"""simple docstring"""
return self.dataset[i][self.key]
class lowercase_ ( UpperCAmelCase_):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = dataset
a_ = keya
a_ = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _UpperCAmelCase ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 483 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
UpperCAmelCase = os.path.join(UpperCamelCase__ , 'words.txt' )
UpperCAmelCase = ''
with open(UpperCamelCase__ ) as f:
UpperCAmelCase = f.readline()
UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase = [
word
for word in [sum(ord(UpperCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 373 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCamelCase ( _lowerCAmelCase = "isbn/0140328726" ) -> dict:
"""simple docstring"""
A : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
A : List[str] = f'''{olid} is not a valid Open Library olid'''
raise ValueError(UpperCamelCase__ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def __UpperCamelCase ( _lowerCAmelCase ) -> dict:
"""simple docstring"""
A : Any = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
A : str = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A : int = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
A : int = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A : List[str] = """, """.join(UpperCamelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE_:int = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
SCREAMING_SNAKE_CASE_:str = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("""\n""".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 662 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
__SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=0 ):
'''simple docstring'''
if str(_lowercase ).startswith('mps' ):
__a : Optional[Any] = torch.manual_seed(_lowercase )
else:
__a : Optional[int] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
__a : List[Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_lowercase ) ).to(_lowercase )
__a : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 47 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = "RegNetConfig"
# Base docstring
_A = "facebook/regnet-y-040"
_A = [1, 1_088, 7, 7]
# Image classification docstring
_A = "facebook/regnet-y-040"
_A = "tabby, tabby cat"
_A = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , A_ : int , A_ : int = 3 , A_ : int = 1 , A_ : int = 1 , A_ : Optional[str] = "relu" , **A_ : Optional[Any] , )-> Tuple:
super().__init__(**_lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase = tf.keras.layers.ConvaD(
filters=_lowercase , kernel_size=_lowercase , strides=_lowercase , padding="VALID" , groups=_lowercase , use_bias=_lowercase , name="convolution" , )
__UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def A ( self : List[str] , A_ : int )-> List[str]:
__UpperCamelCase = self.convolution(self.padding(_lowercase ) )
__UpperCamelCase = self.normalization(_lowercase )
__UpperCamelCase = self.activation(_lowercase )
return hidden_state
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , A_ : RegNetConfig , **A_ : Optional[Any] )-> Optional[int]:
super().__init__(**_lowercase )
__UpperCamelCase = config.num_channels
__UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def A ( self : int , A_ : int )-> Any:
__UpperCamelCase = shape_list(_lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase = tf.transpose(_lowercase , perm=(0, 2, 3, 1) )
__UpperCamelCase = self.embedder(_lowercase )
return hidden_state
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , A_ : int , A_ : int = 2 , **A_ : Optional[int] )-> Optional[int]:
super().__init__(**_lowercase )
__UpperCamelCase = tf.keras.layers.ConvaD(
filters=_lowercase , kernel_size=1 , strides=_lowercase , use_bias=_lowercase , name="convolution" )
__UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def A ( self : Optional[int] , A_ : tf.Tensor , A_ : bool = False )-> Tuple:
return self.normalization(self.convolution(_lowercase ) , training=_lowercase )
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , A_ : int , A_ : int , **A_ : Dict )-> Tuple:
super().__init__(**_lowercase )
__UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowercase , name="pooler" )
__UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_lowercase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_lowercase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def A ( self : Dict , A_ : str )-> List[Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase = self.pooler(_lowercase )
for layer_module in self.attention:
__UpperCamelCase = layer_module(_lowercase )
__UpperCamelCase = hidden_state * pooled
return hidden_state
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , A_ : RegNetConfig , A_ : int , A_ : int , A_ : int = 1 , **A_ : Dict )-> Dict:
super().__init__(**_lowercase )
__UpperCamelCase = in_channels != out_channels or stride != 1
__UpperCamelCase = max(1 , out_channels // config.groups_width )
__UpperCamelCase = (
TFRegNetShortCut(_lowercase , stride=_lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase = [
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_lowercase , stride=_lowercase , groups=_lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=_lowercase , name="layer.2" ),
]
__UpperCamelCase = ACTaFN[config.hidden_act]
def A ( self : List[str] , A_ : Optional[int] )-> List[Any]:
__UpperCamelCase = hidden_state
for layer_module in self.layers:
__UpperCamelCase = layer_module(_lowercase )
__UpperCamelCase = self.shortcut(_lowercase )
hidden_state += residual
__UpperCamelCase = self.activation(_lowercase )
return hidden_state
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : RegNetConfig , A_ : int , A_ : int , A_ : int = 1 , **A_ : Optional[int] )-> Dict:
super().__init__(**_lowercase )
__UpperCamelCase = in_channels != out_channels or stride != 1
__UpperCamelCase = max(1 , out_channels // config.groups_width )
__UpperCamelCase = (
TFRegNetShortCut(_lowercase , stride=_lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase = [
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_lowercase , stride=_lowercase , groups=_lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=_lowercase , name="layer.3" ),
]
__UpperCamelCase = ACTaFN[config.hidden_act]
def A ( self : List[Any] , A_ : Optional[int] )-> Optional[int]:
__UpperCamelCase = hidden_state
for layer_module in self.layers:
__UpperCamelCase = layer_module(_lowercase )
__UpperCamelCase = self.shortcut(_lowercase )
hidden_state += residual
__UpperCamelCase = self.activation(_lowercase )
return hidden_state
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , A_ : RegNetConfig , A_ : int , A_ : int , A_ : int = 2 , A_ : int = 2 , **A_ : List[str] )-> str:
super().__init__(**_lowercase )
__UpperCamelCase = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_lowercase , _lowercase , _lowercase , stride=_lowercase , name="layers.0" ),
*[layer(_lowercase , _lowercase , _lowercase , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def A ( self : Optional[int] , A_ : Optional[Any] )-> Optional[Any]:
for layer_module in self.layers:
__UpperCamelCase = layer_module(_lowercase )
return hidden_state
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , A_ : RegNetConfig , **A_ : Union[str, Any] )-> str:
super().__init__(**_lowercase )
__UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowercase , _lowercase , _lowercase , depth=_lowercase , name=f"""stages.{i+1}""" ) )
def A ( self : str , A_ : tf.Tensor , A_ : bool = False , A_ : bool = True )-> Tuple:
__UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase = hidden_states + (hidden_state,)
__UpperCamelCase = stage_module(_lowercase )
if output_hidden_states:
__UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowercase , hidden_states=_lowercase )
@keras_serializable
class __UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
_snake_case : Dict = RegNetConfig
def __init__( self : str , A_ : str , **A_ : Dict )-> Any:
super().__init__(**_lowercase )
__UpperCamelCase = config
__UpperCamelCase = TFRegNetEmbeddings(_lowercase , name="embedder" )
__UpperCamelCase = TFRegNetEncoder(_lowercase , name="encoder" )
__UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowercase , name="pooler" )
@unpack_inputs
def A ( self : Union[str, Any] , A_ : tf.Tensor , A_ : Optional[bool] = None , A_ : Optional[bool] = None , A_ : bool = False , )-> Union[str, Any]:
__UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase = self.embedder(_lowercase , training=_lowercase )
__UpperCamelCase = self.encoder(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , training=_lowercase )
__UpperCamelCase = encoder_outputs[0]
__UpperCamelCase = self.pooler(_lowercase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase = tf.transpose(_lowercase , perm=(0, 3, 1, 2) )
__UpperCamelCase = tf.transpose(_lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase = tuple([tf.transpose(_lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
_snake_case : str = RegNetConfig
_snake_case : Optional[int] = 'regnet'
_snake_case : List[str] = 'pixel_values'
@property
def A ( self : List[Any] )-> int:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
_A = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_A = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase_ , )
class __UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , A_ : RegNetConfig , *A_ : int , **A_ : Dict )-> List[Any]:
super().__init__(_lowercase , *_lowercase , **_lowercase )
__UpperCamelCase = TFRegNetMainLayer(_lowercase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : int , A_ : tf.Tensor , A_ : Optional[bool] = None , A_ : Optional[bool] = None , A_ : Optional[int]=False , )-> Optional[int]:
__UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase = self.regnet(
pixel_values=_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , training=_lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase_ , )
class __UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A_ : RegNetConfig , *A_ : Dict , **A_ : Optional[int] )-> Dict:
super().__init__(_lowercase , *_lowercase , **_lowercase )
__UpperCamelCase = config.num_labels
__UpperCamelCase = TFRegNetMainLayer(_lowercase , name="regnet" )
# classification head
__UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[int] , A_ : tf.Tensor = None , A_ : tf.Tensor = None , A_ : bool = None , A_ : bool = None , A_ : Union[str, Any]=False , )-> Optional[int]:
__UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase = self.regnet(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , training=_lowercase )
__UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase = self.classifier[0](_lowercase )
__UpperCamelCase = self.classifier[1](_lowercase )
__UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_lowercase , logits=_lowercase )
if not return_dict:
__UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states ) | 505 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = 0
@slow
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_lowercase ) , 0 )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = AutoConfig.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
# Check that tokenizer_type ≠ model_type
__a = AutoTokenizer.from_pretrained(_lowercase , config=_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowercase , 'vocab.txt' ) )
__a = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type='bert' , use_fast=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowercase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowercase , 'merges.txt' ) )
__a = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type='gpt2' , use_fast=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@require_tokenizers
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowercase , 'vocab.txt' ) )
__a = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type='bert' )
self.assertIsInstance(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowercase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowercase , 'merges.txt' ) )
__a = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type='gpt2' )
self.assertIsInstance(_lowercase , _lowercase )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
with pytest.raises(_lowercase ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__a = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_lowercase , _lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , _lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowercase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__a = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__a = TOKENIZER_MAPPING.values()
__a = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowercase )
@require_tokenizers
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_lowercase ) , _lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , _lowercase )
@require_tokenizers
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_lowercase )
__a = 'Hello, world. How are you?'
__a = tokenizer.tokenize(_lowercase )
self.assertEqual('[UNK]' , tokens[0] )
__a = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_lowercase )
__a = tokenizer.tokenize(_lowercase )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(_lowercase ) , _lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowercase , _lowercase )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
# Check we can load the tokenizer config of an online model.
__a = get_tokenizer_config('bert-base-cased' )
__a = config.pop('_commit_hash' , _lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowercase , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__a = get_tokenizer_config(_lowercase )
self.assertDictEqual(_lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__a = AutoTokenizer.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
__a = get_tokenizer_config(_lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowercase )
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
__a = CustomTokenizer.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowercase )
# Can register in two steps
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_lowercase , fast_tokenizer_class=_lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowercase , slow_tokenizer_class=_lowercase , fast_tokenizer_class=_lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoTokenizer.register(_lowercase , fast_tokenizer_class=_lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__a = BertTokenizerFast.from_pretrained(_lowercase )
bert_tokenizer.save_pretrained(_lowercase )
__a = CustomTokenizerFast.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
__a = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
__a = AutoTokenizer.from_pretrained(_lowercase , use_fast=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
__a = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowercase )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
__a = AutoTokenizer.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
__a = AutoTokenizer.from_pretrained(_lowercase , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
__a =False
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
__a =NewTokenizer
__a =False
try:
AutoConfig.register('custom' , _lowercase )
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
AutoTokenizer.register(_lowercase , fast_tokenizer_class=_lowercase )
# If remote code is not set, the default is to use local
__a = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__a = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
__a = AutoTokenizer.from_pretrained('bert-base' )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__a = AutoTokenizer.from_pretrained(_lowercase , revision='aaaaaa' )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
# Make sure we have cached the tokenizer.
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 448 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
_lowerCAmelCase = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class _UpperCAmelCase ( UpperCAmelCase_ ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_INIT_CONFIGURATION
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = RealmTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_lowercase , normalizer_state.pop("""type""" ) )
A_ : Union[str, Any] = do_lower_case
A_ : str = strip_accents
A_ : Dict = tokenize_chinese_chars
A_ : List[str] = normalizer_class(**_lowercase )
A_ : List[str] = do_lower_case
def _lowerCamelCase ( self , a__ , **a__ ):
A_ : List[Any] = PaddingStrategy.MAX_LENGTH
A_ : Optional[int] = text
A_ : Union[str, Any] = kwargs.pop("""text_pair""" , _lowercase )
A_ : Tuple = kwargs.pop("""return_tensors""" , _lowercase )
A_ : Optional[int] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_lowercase ):
if batch_text_pair is not None:
A_ : List[str] = batch_text_pair[idx]
else:
A_ : Optional[int] = None
A_ : Optional[Any] = super().__call__(_lowercase , _lowercase , return_tensors=_lowercase , **_lowercase )
A_ : Dict = encoded_candidates.get("""input_ids""" )
A_ : Tuple = encoded_candidates.get("""attention_mask""" )
A_ : str = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowercase )
A_ : Optional[Any] = {key: item for key, item in output_data.items() if len(_lowercase ) != 0}
return BatchEncoding(_lowercase , tensor_type=_lowercase )
def _lowerCamelCase ( self , a__ , a__=None ):
A_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , a__ , a__ = None ):
A_ : Optional[int] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , a__ , a__ = None ):
A_ : Optional[Any] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 569 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 0 |
def _snake_case (__lowercase = 50):
UpperCamelCase_ = [1] * (length + 1)
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 23 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 360 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowercase = tuple[int, int]
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : set[int] ,lowerCAmelCase__ : Mapping[EdgeT, int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = vertices
lowerCAmelCase_ : List[Any] = {
(min(_lowercase ), max(_lowercase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : EdgeT ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase_ : Dict = weight
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = Graph({min(self.vertices )} ,{} )
lowerCAmelCase_ : Union[str, Any] = 42
lowerCAmelCase_ : Dict = 42
lowerCAmelCase_ : int = 42
lowerCAmelCase_ : str = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase_ : Tuple = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase_ : Optional[int] = edge
lowerCAmelCase_ : Any = weight
subgraph.add_edge(_lowercase ,_lowercase )
return subgraph
def UpperCamelCase ( snake_case__ = "p107_network.txt"):
lowerCAmelCase_ : int = os.path.abspath(os.path.dirname(UpperCamelCase__))
lowerCAmelCase_ : int = os.path.join(UpperCamelCase__ , UpperCamelCase__)
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[Any] = 42
lowerCAmelCase_ : Union[str, Any] = 42
lowerCAmelCase_ : List[str] = 42
with open(UpperCamelCase__) as f:
lowerCAmelCase_ : Optional[Any] = f.read().strip().split("\n")
lowerCAmelCase_ : Union[str, Any] = [line.split(",") for line in data]
for edgea in range(1 , len(UpperCamelCase__)):
for edgea in range(UpperCamelCase__):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase_ : Optional[Any] = int(adjaceny_matrix[edgea][edgea])
lowerCAmelCase_ : str = Graph(set(range(len(UpperCamelCase__))) , UpperCamelCase__)
lowerCAmelCase_ : int = graph.prims_algorithm()
lowerCAmelCase_ : Tuple = sum(graph.edges.values())
lowerCAmelCase_ : Union[str, Any] = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 659 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
A : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : int=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] = False ) -> str:
"""simple docstring"""
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase__ = """cpu"""
lowercase__ = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
lowercase__ = Path(UpperCamelCase__ )
# TEXT ENCODER
lowercase__ = pipeline.text_encoder.config.max_position_embeddings
lowercase__ = pipeline.text_encoder.config.hidden_size
lowercase__ = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
lowercase__ = pipeline.unet.config.in_channels
lowercase__ = pipeline.unet.config.sample_size
lowercase__ = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
lowercase__ = str(unet_path.absolute().as_posix() )
lowercase__ = os.path.dirname(UpperCamelCase__ )
lowercase__ = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location="""weights.pb""" , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
lowercase__ = pipeline.vae
lowercase__ = vae_encoder.config.in_channels
lowercase__ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase__ = lambda __magic_name__ , __magic_name__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCamelCase__ , )
# VAE DECODER
lowercase__ = pipeline.vae
lowercase__ = vae_decoder.config.latent_channels
lowercase__ = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase__ = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase__ = pipeline.safety_checker
lowercase__ = safety_checker.config.vision_config.num_channels
lowercase__ = safety_checker.config.vision_config.image_size
lowercase__ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
lowercase__ = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
lowercase__ = pipeline.feature_extractor
else:
lowercase__ = None
lowercase__ = None
lowercase__ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print("""ONNX pipeline saved to""" , UpperCamelCase__ )
del pipeline
del onnx_pipeline
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
A : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 15 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ : List[str] ={
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =["ConvNextFeatureExtractor"]
A_ : str =["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] =[
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple =[
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A_ : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure) | 483 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_ = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowercase__ ( lowerCAmelCase : Dict , lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if args.student_type == "roberta":
UpperCAmelCase = False
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=UpperCamelCase__ , choices=['distilbert', 'roberta', 'gpt2'] , required=UpperCamelCase__ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=UpperCamelCase__ , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=UpperCamelCase__ , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=UpperCamelCase__ , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=UpperCamelCase__ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=UpperCamelCase__ , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=UpperCamelCase__ , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=UpperCamelCase__ , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=UpperCamelCase__ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=UpperCamelCase__ , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=UpperCamelCase__ , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=UpperCamelCase__ , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=UpperCamelCase__ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=UpperCamelCase__ , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=UpperCamelCase__ , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=UpperCamelCase__ , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase__ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=UpperCamelCase__ , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=UpperCamelCase__ , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=UpperCamelCase__ , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=UpperCamelCase__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=UpperCamelCase__ , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=UpperCamelCase__ , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=UpperCamelCase__ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=UpperCamelCase__ , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=UpperCamelCase__ , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=UpperCamelCase__ , default=4_000 , help='Checkpoint interval.' )
UpperCAmelCase = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(UpperCamelCase__ )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , 'rb' ) as fp:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , 'rb' ) as fp:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
UpperCAmelCase = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('Student loaded.' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 373 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Union[str, Any] = []
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
for i in range(len(UpperCamelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , len(UpperCamelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
if row >= len(UpperCamelCase__ ):
solution.append(UpperCamelCase__ )
printboard(UpperCamelCase__ )
print()
return True
for i in range(len(UpperCamelCase__ ) ):
if is_safe(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A : List[Any] = 1
solve(UpperCamelCase__ , row + 1 )
A : str = 0
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
for i in range(len(UpperCamelCase__ ) ):
for j in range(len(UpperCamelCase__ ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
SCREAMING_SNAKE_CASE_:str = 8
SCREAMING_SNAKE_CASE_:Any = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 662 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Optional[int] = torch.nn.Linear(1_0 , 1_0 )
__a : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
__a : List[str] = Accelerator()
__a : Union[str, Any] = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
_A = {"bert_for_seq_generation": 512}
class __UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = []
_snake_case : str = ['input_ids', 'attention_mask']
def __init__( self : Dict , A_ : Union[str, Any] , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Tuple="<unk>" , A_ : Tuple="<pad>" , A_ : Optional[Any]="<::::>" , A_ : Optional[Dict[str, Any]] = None , **A_ : Any , )-> List[str]:
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , sep_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
@property
def A ( self : str )-> Optional[Any]:
return self.sp_model.get_piece_size()
def A ( self : str )-> str:
__UpperCamelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str )-> Union[str, Any]:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self : Tuple , A_ : int )-> Any:
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[int] , A_ : str )-> Tuple:
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def A ( self : Optional[Any] , A_ : str )-> List[Any]:
return self.sp_model.piece_to_id(_lowercase )
def A ( self : Union[str, Any] , A_ : Optional[Any] )-> Optional[int]:
__UpperCamelCase = self.sp_model.IdToPiece(_lowercase )
return token
def A ( self : str , A_ : Optional[Any] )-> Tuple:
__UpperCamelCase = []
__UpperCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase ) + token
__UpperCamelCase = []
else:
current_sub_tokens.append(_lowercase )
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def A ( self : Tuple , A_ : str , A_ : Optional[str] = None )-> str:
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCamelCase = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , "wb" ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,) | 505 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Dict ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE__: Dict, SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Optional[Any]=0, SCREAMING_SNAKE_CASE__: Optional[Any]=None ):
__a = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__a = math.floor(val / multiple ) * multiple
if x < min_val:
__a = math.ceil(val / multiple ) * multiple
return x
__a = (output_size, output_size) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else output_size
__a , __a = get_image_size(UpperCamelCase__ )
__a , __a = output_size
# determine new height and width
__a = output_height / input_height
__a = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__a = scale_width
else:
# fit height
__a = scale_height
__a = constraint_to_multiple_of(scale_height * input_height, multiple=UpperCamelCase__ )
__a = constraint_to_multiple_of(scale_width * input_width, multiple=UpperCamelCase__ )
return (new_height, new_width)
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
__a =["pixel_values"]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
__a = size if size is not None else {'height': 384, 'width': 384}
__a = get_size_dict(_lowercase )
__a = do_resize
__a = size
__a = keep_aspect_ratio
__a = ensure_multiple_of
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ) ->Optional[Any]:
'''simple docstring'''
__a = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
__a = get_resize_output_image_size(
_lowercase , output_size=(size['height'], size['width']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) ->List[Any]:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) ->List[str]:
'''simple docstring'''
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) ->Any:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(_lowercase )
__a = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__a = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
__a = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
__a = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->int:
'''simple docstring'''
__a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
__a = target_sizes.numpy()
__a = []
for idx in range(len(_lowercase ) ):
__a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
__a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
__a = logits.argmax(dim=1 )
__a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 448 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
A_ : int = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowercase ).to(_lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A_ : Any = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
A_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
A_ : Dict = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
A_ : Union[str, Any] = -(labels.shape[-1] * loss.item())
A_ : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 569 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = TFMBartModel(config=_lowercase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = self.translate_src_text(**_lowercase )
self.assertListEqual(self.expected_text , _lowercase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> str:
UpperCamelCase_ = self.tokenizer(self.src_text , **_lowercase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> int:
self._assert_generated_batch_equal_expected()
| 23 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 0 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE: Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE: List[Any] = "T5Config"
class lowercase_ (UpperCAmelCase_ ):
lowerCAmelCase__ ="mt5"
lowerCAmelCase__ =MTaConfig
class lowercase_ (UpperCAmelCase_ ):
lowerCAmelCase__ ="mt5"
lowerCAmelCase__ =MTaConfig
class lowercase_ (UpperCAmelCase_ ):
lowerCAmelCase__ ="mt5"
lowerCAmelCase__ =MTaConfig | 360 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __snake_case ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCamelCase_ = 'mra'
def __init__( self : int ,lowerCAmelCase__ : Any=5_02_65 ,lowerCAmelCase__ : str=7_68 ,lowerCAmelCase__ : Union[str, Any]=12 ,lowerCAmelCase__ : Dict=12 ,lowerCAmelCase__ : int=30_72 ,lowerCAmelCase__ : Any="gelu" ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[Any]=0.1 ,lowerCAmelCase__ : Dict=5_12 ,lowerCAmelCase__ : Any=1 ,lowerCAmelCase__ : Tuple=0.02 ,lowerCAmelCase__ : str=1e-5 ,lowerCAmelCase__ : Any="absolute" ,lowerCAmelCase__ : List[Any]=4 ,lowerCAmelCase__ : Optional[int]="full" ,lowerCAmelCase__ : Tuple=0 ,lowerCAmelCase__ : Optional[Any]=0 ,lowerCAmelCase__ : Dict=1 ,lowerCAmelCase__ : Union[str, Any]=0 ,lowerCAmelCase__ : Tuple=2 ,**lowerCAmelCase__ : Dict ,) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase ,bos_token_id=_lowercase ,eos_token_id=_lowercase ,**_lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Optional[Any] = type_vocab_size
lowerCAmelCase_ : int = layer_norm_eps
lowerCAmelCase_ : Tuple = position_embedding_type
lowerCAmelCase_ : Tuple = block_per_row
lowerCAmelCase_ : str = approx_mode
lowerCAmelCase_ : Any = initial_prior_first_n_blocks
lowerCAmelCase_ : List[Any] = initial_prior_diagonal_n_blocks
| 659 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase ( __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(UpperCamelCase__ , """_dynamo""" ):
return False
return isinstance(UpperCamelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : List[Any] = True ) -> str:
"""simple docstring"""
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(UpperCamelCase__ )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(UpperCamelCase__ , """forward""" )
lowercase__ = model.__dict__.pop("""_original_forward""" , UpperCamelCase__ )
if original_forward is not None:
while hasattr(UpperCamelCase__ , """__wrapped__""" ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(UpperCamelCase__ , """_converted_to_transformer_engine""" , UpperCamelCase__ ):
convert_model(UpperCamelCase__ , to_transformer_engine=UpperCamelCase__ )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def UpperCamelCase ( ) -> int:
"""simple docstring"""
PartialState().wait_for_everyone()
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCamelCase__ , UpperCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCamelCase__ , UpperCamelCase__ )
@contextmanager
def UpperCamelCase ( **__magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
lowercase__ = str(UpperCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if not hasattr(UpperCamelCase__ , """__qualname__""" ) and not hasattr(UpperCamelCase__ , """__name__""" ):
lowercase__ = getattr(UpperCamelCase__ , """__class__""" , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , """__qualname__""" ):
return obj.__qualname__
if hasattr(UpperCamelCase__ , """__name__""" ):
return obj.__name__
return str(UpperCamelCase__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
for key, value in source.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ = destination.setdefault(UpperCamelCase__ , {} )
merge_dicts(UpperCamelCase__ , UpperCamelCase__ )
else:
lowercase__ = value
return destination
def UpperCamelCase ( __magic_name__ : List[str] = None ) -> bool:
"""simple docstring"""
if port is None:
lowercase__ = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 15 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 0 |
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = [0 for i in range(r + 1 )]
# nc0 = 1
a_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a_ = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 483 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : Dict = 2
@add_end_docstrings(UpperCAmelCase_ )
class _UpperCAmelCase ( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *lowercase_ , **lowercase_ ) -> List[Any]:
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase = None
if self.model.config.prefix is not None:
UpperCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
UpperCAmelCase = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
UpperCAmelCase = {}
if prefix is not None:
UpperCAmelCase = prefix
if prefix:
UpperCAmelCase = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
UpperCAmelCase = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
UpperCAmelCase = handle_long_generation
preprocess_params.update(_lowercase )
UpperCAmelCase = generate_kwargs
UpperCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
UpperCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
UpperCAmelCase = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *lowercase_ , **lowercase_ ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self , lowercase_ , **lowercase_ ) -> int:
return super().__call__(_lowercase , **_lowercase )
def a_ ( self , lowercase_ , lowercase_="" , lowercase_=None , **lowercase_ ) -> int:
UpperCAmelCase = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
UpperCAmelCase = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase = generate_kwargs['max_new_tokens']
else:
UpperCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
UpperCAmelCase = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase = inputs['attention_mask'][:, -keep_length:]
return inputs
def a_ ( self , lowercase_ , **lowercase_ ) -> str:
UpperCAmelCase = model_inputs['input_ids']
UpperCAmelCase = model_inputs.get('attention_mask' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = 1
else:
UpperCAmelCase = input_ids.shape[0]
UpperCAmelCase = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
UpperCAmelCase = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
UpperCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , lowercase_ , lowercase_=ReturnType.FULL_TEXT , lowercase_=True ) -> Optional[int]:
UpperCAmelCase = model_outputs['generated_sequence'][0]
UpperCAmelCase = model_outputs['input_ids']
UpperCAmelCase = model_outputs['prompt_text']
UpperCAmelCase = generated_sequence.numpy().tolist()
UpperCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase = 0
else:
UpperCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase = prompt_text + text[prompt_length:]
else:
UpperCAmelCase = text[prompt_length:]
UpperCAmelCase = {'generated_text': all_text}
records.append(_lowercase )
return records
| 373 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
def __UpperCamelCase ( _lowerCAmelCase = 10**9 ) -> int:
"""simple docstring"""
A : Optional[int] = 1
A : Optional[int] = 2
A : str = 0
A : Dict = 0
A : Optional[int] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": 4096,
"allenai/longformer-large-4096": 4096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ( ):
__a : Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__a : Optional[Any] = bs[:]
__a : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__a : Tuple = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : List[str] = set()
__a : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a : List[str] = char
return pairs
class _UpperCamelCase( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]="replace" , SCREAMING_SNAKE_CASE__ : str="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
__a : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
__a : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
__a : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
__a : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
__a : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding='utf-8' ) as vocab_handle:
__a : Any = json.load(_lowercase )
__a : Tuple = {v: k for k, v in self.encoder.items()}
__a : int = errors # how to handle errors in decoding
__a : Optional[int] = bytes_to_unicode()
__a : Any = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding='utf-8' ) as merges_handle:
__a : int = merges_handle.read().split('\n' )[1:-1]
__a : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__a : Dict = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Tuple = {}
__a : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a : List[str] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a : Dict = tuple(_lowercase )
__a : str = get_pairs(_lowercase )
if not pairs:
return token
while True:
__a : Union[str, Any] = min(_lowercase , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(_lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a : str = bigram
__a : str = []
__a : Optional[Any] = 0
while i < len(_lowercase ):
try:
__a : int = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a : Tuple = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a : str = tuple(_lowercase )
__a : Optional[int] = new_word
if len(_lowercase ) == 1:
break
else:
__a : List[Any] = get_pairs(_lowercase )
__a : Optional[Any] = ' '.join(_lowercase )
__a : Union[str, Any] = word
return word
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Optional[int] = []
for token in re.findall(self.pat , _lowercase ):
__a : str = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(' ' ) )
return bpe_tokens
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.decoder.get(_lowercase )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : Dict = ''.join(_lowercase )
__a : str = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '\n' )
__a : Tuple = 0
with open(_lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__a : Any = token_index
writer.write(' '.join(_lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Any = [self.cls_token_id]
__a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Dict = [self.sep_token_id]
__a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
__a : List[Any] = ' ' + text
return (text, kwargs)
| 47 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase (_snake_case ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = filter(lambda _snake_case : p.requires_grad ,model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A = logging.getLogger(__name__)
def lowercase (_snake_case ,_snake_case ) -> Union[str, Any]:
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__UpperCamelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__UpperCamelCase = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
__UpperCamelCase = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
__UpperCamelCase = ModelCheckpoint(
dirpath=UpperCamelCase__ ,filename=UpperCamelCase__ ,monitor=f"""val_{metric}""" ,mode="max" ,save_top_k=1 ,every_n_epochs=1 ,)
return checkpoint_callback
def lowercase (_snake_case ,_snake_case ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" ,mode="min" if "loss" in metric else "max" ,patience=UpperCamelCase__ ,verbose=UpperCamelCase__ ,)
class __UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def A ( self : Optional[Any] , A_ : Optional[int] , A_ : Any )-> Union[str, Any]:
__UpperCamelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowercase )
@rank_zero_only
def A ( self : Tuple , A_ : pl.Trainer , A_ : pl.LightningModule , A_ : str , A_ : Any=True )-> Optional[Any]:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / "test_results.txt"
__UpperCamelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__UpperCamelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowercase )
generations_file.parent.mkdir(exist_ok=_lowercase )
with open(_lowercase , "a+" ) as writer:
for key in sorted(_lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(_lowercase , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = f"""{key}: {val:.6f}\n"""
writer.write(_lowercase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_lowercase )
@rank_zero_only
def A ( self : Any , A_ : Union[str, Any] , A_ : List[str] )-> int:
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(_lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def A ( self : List[Any] , A_ : pl.Trainer , A_ : pl.LightningModule )-> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowercase , _lowercase , "test" )
@rank_zero_only
def A ( self : Tuple , A_ : pl.Trainer , A_ : Optional[int] )-> str:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 505 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ) ->int:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=False ) ->Optional[Any]:
'''simple docstring'''
if not batched:
__a = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size['shortest_edge'] * h / w )
__a = self.size['shortest_edge']
elif w > h:
__a = self.size['shortest_edge']
__a = int(self.size['shortest_edge'] * w / h )
else:
__a = self.size['shortest_edge']
__a = self.size['shortest_edge']
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(_lowercase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(_lowercase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , unittest.TestCase ):
__a =YolosImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = YolosImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _lowercase )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _lowercase )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
pass
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
__a = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(_lowercase , return_tensors='pt' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(_lowercase , return_tensors='pt' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
# Initialize image_processings
__a = self.image_processing_class(**self.image_processor_dict )
__a = self.image_processing_class(do_resize=_lowercase , do_normalize=_lowercase , do_rescale=_lowercase )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__a = image_processing_a.pad(_lowercase , return_tensors='pt' )
__a = image_processing_a(_lowercase , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
# prepare image and target
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__a = json.loads(f.read() )
__a = {'image_id': 3_9769, 'annotations': target}
# encode them
__a = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__a = image_processing(images=_lowercase , annotations=_lowercase , return_tensors='pt' )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowercase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowercase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowercase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowercase , atol=1e-3 ) )
# verify image_id
__a = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowercase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowercase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowercase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowercase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowercase ) )
@slow
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
# prepare image, target and masks_path
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__a = json.loads(f.read() )
__a = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__a = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__a = YolosImageProcessor(format='coco_panoptic' )
__a = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors='pt' )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowercase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowercase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowercase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowercase , atol=1e-3 ) )
# verify image_id
__a = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowercase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowercase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowercase ) )
# verify masks
__a = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowercase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowercase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowercase ) ) | 448 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class _UpperCAmelCase ( UpperCAmelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
a = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
a = '''question'''
a = '''context'''
a = '''answers'''
@property
def _lowerCamelCase ( self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 569 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 0 |
import argparse
from collections import defaultdict
import yaml
snake_case__ : Optional[Any] = "docs/source/en/_toctree.yml"
def _snake_case (__lowercase):
UpperCamelCase_ = defaultdict(UpperCamelCase__)
UpperCamelCase_ = []
UpperCamelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']})
else:
new_doc_list.append(UpperCamelCase__)
UpperCamelCase_ = new_doc_list
UpperCamelCase_ = [key for key, value in counts.items() if value > 1]
UpperCamelCase_ = []
for duplicate_key in duplicates:
UpperCamelCase_ = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key})
if len(UpperCamelCase__) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.')
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1])
UpperCamelCase_ = sorted(UpperCamelCase__ , key=lambda __lowercase: s["title"].lower())
# "overview" gets special treatment and is always first
if len(UpperCamelCase__) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.')
overview_doc.extend(UpperCamelCase__)
# Sort
return overview_doc
def _snake_case (__lowercase=False):
with open(UpperCamelCase__ , encoding='utf-8') as f:
UpperCamelCase_ = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase_ = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase_ = api_doc[scheduler_idx]['sections']
UpperCamelCase_ = clean_doc_toc(UpperCamelCase__)
UpperCamelCase_ = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase_ = True
if overwrite:
UpperCamelCase_ = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase_ = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
def _snake_case (__lowercase=False):
with open(UpperCamelCase__ , encoding='utf-8') as f:
UpperCamelCase_ = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase_ = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase_ = False
UpperCamelCase_ = api_doc[pipeline_idx]['sections']
UpperCamelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase_ = pipeline_doc['section']
UpperCamelCase_ = clean_doc_toc(UpperCamelCase__)
if overwrite:
UpperCamelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__)
# sort overall pipeline doc
UpperCamelCase_ = clean_doc_toc(UpperCamelCase__)
if new_pipeline_docs != pipeline_docs:
UpperCamelCase_ = True
if overwrite:
UpperCamelCase_ = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase_ = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case__ : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 23 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE: List[str] = Lock()
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> List[str]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def _a ( lowerCAmelCase )-> List[str]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _a ( )-> Optional[Any]:
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main() | 360 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 0 |
import functools
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : int = len(UpperCamelCase__)
lowerCAmelCase_ : Optional[int] = len(UpperCamelCase__)
@functools.cache
def min_distance(snake_case__ , snake_case__) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ : List[str] = int(worda[indexa] != worda[indexa]) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase__) , 1 + min_distance(UpperCamelCase__ , indexa + 1) , diff + min_distance(indexa + 1 , indexa + 1) , )
return min_distance(0 , 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
def UpperCamelCase ( __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
lowercase__ = DetaConfig(
backbone_config=UpperCamelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=UpperCamelCase__ , with_box_refine=UpperCamelCase__ , two_stage=UpperCamelCase__ , )
# set labels
lowercase__ = """huggingface/label-files"""
if "o365" in model_name:
lowercase__ = 366
lowercase__ = """object365-id2label.json"""
else:
lowercase__ = 91
lowercase__ = """coco-detection-id2label.json"""
lowercase__ = num_labels
lowercase__ = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = dct.pop(UpperCamelCase__ )
lowercase__ = val
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowercase__ = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:hidden_size, :]
lowercase__ = in_proj_bias[:hidden_size]
lowercase__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowercase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ = in_proj_weight[-hidden_size:, :]
lowercase__ = in_proj_bias[-hidden_size:]
def UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = get_deta_config(UpperCamelCase__ )
# load original state dict
if model_name == "deta-swin-large":
lowercase__ = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
lowercase__ = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
lowercase__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
# rename keys
lowercase__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowercase__ = state_dict.pop(UpperCamelCase__ )
lowercase__ = val
if "input_proj" in key:
lowercase__ = state_dict.pop(UpperCamelCase__ )
lowercase__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowercase__ = state_dict.pop(UpperCamelCase__ )
lowercase__ = val
# finally, create HuggingFace model and load state dict
lowercase__ = DetaForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
lowercase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(UpperCamelCase__ )
# load image processor
lowercase__ = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
lowercase__ = prepare_img()
lowercase__ = processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase__ = encoding["""pixel_values"""]
lowercase__ = model(pixel_values.to(UpperCamelCase__ ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowercase__ = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
lowercase__ = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
lowercase__ = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
lowercase__ = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase__ ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A : Dict = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.0 , _UpperCAmelCase = None , _UpperCAmelCase = "geglu" , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = "layer_norm" , _UpperCAmelCase = False , ):
"""simple docstring"""
super().__init__()
a_ = only_cross_attention
a_ = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
a_ = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
a_ = AdaLayerNorm(_lowercase , _lowercase )
elif self.use_ada_layer_norm_zero:
a_ = AdaLayerNormZero(_lowercase , _lowercase )
else:
a_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
a_ = Attention(
query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
a_ = (
AdaLayerNorm(_lowercase , _lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
)
a_ = Attention(
query_dim=_lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , upcast_attention=_lowercase , ) # is self-attn if encoder_hidden_states is none
else:
a_ = None
a_ = None
# 3. Feed-forward
a_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
a_ = FeedForward(_lowercase , dropout=_lowercase , activation_fn=_lowercase , final_dropout=_lowercase )
# let chunk size default to None
a_ = None
a_ = 0
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = chunk_size
a_ = dim
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
a_ = self.norma(_lowercase , _lowercase )
elif self.use_ada_layer_norm_zero:
a_ , a_ , a_ , a_ , a_ = self.norma(
_lowercase , _lowercase , _lowercase , hidden_dtype=hidden_states.dtype )
else:
a_ = self.norma(_lowercase )
a_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
a_ = self.attna(
_lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowercase , **_lowercase , )
if self.use_ada_layer_norm_zero:
a_ = gate_msa.unsqueeze(1 ) * attn_output
a_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
a_ = (
self.norma(_lowercase , _lowercase ) if self.use_ada_layer_norm else self.norma(_lowercase )
)
a_ = self.attna(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=_lowercase , **_lowercase , )
a_ = attn_output + hidden_states
# 3. Feed-forward
a_ = self.norma(_lowercase )
if self.use_ada_layer_norm_zero:
a_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
a_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
a_ = torch.cat(
[self.ff(_lowercase ) for hid_slice in norm_hidden_states.chunk(_lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
a_ = self.ff(_lowercase )
if self.use_ada_layer_norm_zero:
a_ = gate_mlp.unsqueeze(1 ) * ff_output
a_ = ff_output + hidden_states
return hidden_states
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = "geglu" , _UpperCAmelCase = False , ):
"""simple docstring"""
super().__init__()
a_ = int(dim * mult )
a_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
a_ = GELU(_lowercase , _lowercase )
if activation_fn == "gelu-approximate":
a_ = GELU(_lowercase , _lowercase , approximate="""tanh""" )
elif activation_fn == "geglu":
a_ = GEGLU(_lowercase , _lowercase )
elif activation_fn == "geglu-approximate":
a_ = ApproximateGELU(_lowercase , _lowercase )
a_ = nn.ModuleList([] )
# project in
self.net.append(_lowercase )
# project dropout
self.net.append(nn.Dropout(_lowercase ) )
# project out
self.net.append(nn.Linear(_lowercase , _lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowercase ) )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
for module in self.net:
a_ = module(_lowercase )
return hidden_states
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "none" ):
"""simple docstring"""
super().__init__()
a_ = nn.Linear(_lowercase , _lowercase )
a_ = approximate
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = self.proj(_lowercase )
a_ = self.gelu(_lowercase )
return hidden_states
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a_ = nn.Linear(_lowercase , dim_out * 2 )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ , a_ = self.proj(_lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_lowercase )
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a_ = nn.Linear(_lowercase , _lowercase )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = self.proj(_lowercase )
return x * torch.sigmoid(1.7_0_2 * x )
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a_ = nn.Embedding(_lowercase , _lowercase )
a_ = nn.SiLU()
a_ = nn.Linear(_lowercase , embedding_dim * 2 )
a_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = self.linear(self.silu(self.emb(_lowercase ) ) )
a_ , a_ = torch.chunk(_lowercase , 2 )
a_ = self.norm(_lowercase ) * (1 + scale) + shift
return x
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a_ = CombinedTimestepLabelEmbeddings(_lowercase , _lowercase )
a_ = nn.SiLU()
a_ = nn.Linear(_lowercase , 6 * embedding_dim , bias=_lowercase )
a_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase , eps=1e-6 )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
a_ = self.linear(self.silu(self.emb(_lowercase , _lowercase , hidden_dtype=_lowercase ) ) )
a_ , a_ , a_ , a_ , a_ , a_ = emb.chunk(6 , dim=1 )
a_ = self.norm(_lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowercase_ ( nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 1e-5 ):
"""simple docstring"""
super().__init__()
a_ = num_groups
a_ = eps
if act_fn is None:
a_ = None
else:
a_ = get_activation(_lowercase )
a_ = nn.Linear(_lowercase , out_dim * 2 )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if self.act:
a_ = self.act(_lowercase )
a_ = self.linear(_lowercase )
a_ = emb[:, :, None, None]
a_ , a_ = emb.chunk(2 , dim=1 )
a_ = F.group_norm(_lowercase , self.num_groups , eps=self.eps )
a_ = x * (1 + scale) + shift
return x | 483 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class _UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ) -> int:
UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase = 'A painting of a squirrel eating a burger '
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=_lowercase , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowercase )
UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase = generator.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=_lowercase , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase = 'A painting of a squirrel eating a burger '
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=_lowercase , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
UpperCAmelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 373 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
SCREAMING_SNAKE_CASE_:Optional[int] = HfApi()
SCREAMING_SNAKE_CASE_:Optional[Any] = {}
# fmt: off
SCREAMING_SNAKE_CASE_:Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
SCREAMING_SNAKE_CASE_:Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
SCREAMING_SNAKE_CASE_:Dict = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
SCREAMING_SNAKE_CASE_:Tuple = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
SCREAMING_SNAKE_CASE_:Tuple = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
SCREAMING_SNAKE_CASE_:Dict = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
SCREAMING_SNAKE_CASE_:int = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
SCREAMING_SNAKE_CASE_:Tuple = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
SCREAMING_SNAKE_CASE_:Optional[int] = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
SCREAMING_SNAKE_CASE_:int = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
SCREAMING_SNAKE_CASE_:Tuple = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
SCREAMING_SNAKE_CASE_:Any = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
SCREAMING_SNAKE_CASE_:Tuple = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
SCREAMING_SNAKE_CASE_:Dict = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
SCREAMING_SNAKE_CASE_:Dict = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
SCREAMING_SNAKE_CASE_:int = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
SCREAMING_SNAKE_CASE_:str = "/home/patrick/google_checkpoints/" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
SCREAMING_SNAKE_CASE_:Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
SCREAMING_SNAKE_CASE_:Tuple = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
SCREAMING_SNAKE_CASE_:Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
SCREAMING_SNAKE_CASE_:Optional[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
SCREAMING_SNAKE_CASE_:Union[str, Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 662 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase:
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : int=3_2 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Dict=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE__ : Any=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[str]="relu" , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Dict = image_size
__a : List[str] = num_channels
__a : Union[str, Any] = embeddings_size
__a : int = hidden_sizes
__a : Optional[Any] = depths
__a : List[str] = is_training
__a : List[str] = use_labels
__a : Optional[int] = hidden_act
__a : str = num_labels
__a : Union[str, Any] = scope
__a : Optional[int] = len(_lowercase )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : int = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a : str = TFRegNetModel(config=_lowercase )
__a : List[str] = model(_lowercase , training=_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Optional[int] = self.num_labels
__a : List[str] = TFRegNetForImageClassification(_lowercase )
__a : Tuple = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.prepare_config_and_inputs()
__a , __a , __a : str = config_and_inputs
__a : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Dict = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Optional[int] = TFRegNetModelTester(self )
__a : str = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Union[str, Any] = [*signature.parameters.keys()]
__a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
__a : Tuple = model_class(_lowercase )
__a : List[str] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a : Optional[int] = layer_type
__a : Optional[int] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int={} ):
__a : List[Any] = model(_lowercase , return_dict=_lowercase , **_lowercase )
__a : Any = model(_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowercase , _lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
__a : Union[str, Any] = model_class(_lowercase )
__a : Tuple = self._prepare_for_class(_lowercase , _lowercase )
__a : List[str] = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__a : Optional[int] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__a : List[Any] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__a : Any = self._prepare_for_class(_lowercase , _lowercase )
__a : Tuple = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'output_hidden_states': True} )
__a : Any = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__a : Any = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'output_hidden_states': True} )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFRegNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase__ ( ):
__a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCamelCase( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Tuple = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : str = self.default_image_processor
__a : Union[str, Any] = prepare_img()
__a : Tuple = image_processor(images=_lowercase , return_tensors='tf' )
# forward pass
__a : Union[str, Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : int = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
| 47 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] )-> Any:
__UpperCamelCase = 0
def A ( self : int )-> Dict:
__UpperCamelCase = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(_lowercase , _lowercase )
def A ( self : str )-> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = Path(_lowercase ) / "preprocessor_config.json"
__UpperCamelCase = Path(_lowercase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def A ( self : Optional[Any] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = Path(_lowercase ) / "preprocessor_config.json"
__UpperCamelCase = Path(_lowercase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def A ( self : Dict )-> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCamelCase = Path(_lowercase ) / "preprocessor_config.json"
__UpperCamelCase = Path(_lowercase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase ).to_dict()
config_dict.pop("image_processor_type" )
__UpperCamelCase = CLIPImageProcessor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
__UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def A ( self : Optional[int] )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = Path(_lowercase ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def A ( self : str )-> Tuple:
with self.assertRaisesRegex(
_lowercase , "clip-base is not a local folder and is not a valid model identifier" ):
__UpperCamelCase = AutoImageProcessor.from_pretrained("clip-base" )
def A ( self : Union[str, Any] )-> Optional[int]:
with self.assertRaisesRegex(
_lowercase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase , revision="aaaaaa" )
def A ( self : Any )-> int:
with self.assertRaisesRegex(
_lowercase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__UpperCamelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def A ( self : List[Any] )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
__UpperCamelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
__UpperCamelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
__UpperCamelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def A ( self : Optional[Any] )-> Optional[Any]:
try:
AutoConfig.register("custom" , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoImageProcessor.register(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = Path(_lowercase ) / "preprocessor_config.json"
__UpperCamelCase = Path(_lowercase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
__UpperCamelCase = CustomImageProcessor.from_pretrained(_lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
__UpperCamelCase = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : Union[str, Any] )-> Optional[Any]:
class __UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = True
try:
AutoConfig.register("custom" , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(_lowercase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 505 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : List[str] = 16
__UpperCamelCase : Tuple = 32
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: List[str] = 16 ) -> Any:
"""simple docstring"""
__a = AutoTokenizer.from_pretrained('bert-base-cased' )
__a = load_dataset('glue', 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__: List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['sentence1'], examples['sentence2'], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a = datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__: Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a = 16
elif accelerator.mixed_precision != "no":
__a = 8
else:
__a = None
return tokenizer.pad(
UpperCamelCase__, padding='longest', max_length=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_tensors='pt', )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['train'], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
__a = DataLoader(
tokenized_datasets['validation'], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase : int = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: Dict ) -> List[Any]:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', UpperCamelCase__ ) == "1":
__a = 2
# Initialize accelerator
__a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['lr']
__a = int(config['num_epochs'] )
__a = int(config['seed'] )
__a = int(config['batch_size'] )
__a = evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCamelCase__ )
def inner_training_loop(SCREAMING_SNAKE_CASE__: List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a = model.to(accelerator.device )
# Instantiate optimizer
__a = AdamW(params=model.parameters(), lr=UpperCamelCase__ )
__a , __a = get_dataloaders(UpperCamelCase__, UpperCamelCase__ )
# Instantiate scheduler
__a = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=100, num_training_steps=(len(UpperCamelCase__ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a = model(**UpperCamelCase__ )
__a = outputs.loss
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**UpperCamelCase__ )
__a = outputs.logits.argmax(dim=-1 )
__a , __a = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase__, references=UpperCamelCase__, )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", UpperCamelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__a = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=UpperCamelCase__, default=UpperCamelCase__, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
__a = parser.parse_args()
__a = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main() | 448 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 0 |
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase = get_logger(__name__)
class _UpperCAmelCase :
a = '''dummy_data'''
a = '''datasets'''
a = False
def __init__( self , a__ , a__ , a__ , a__ = None , a__ = False , a__ = True , a__ = None , ):
A_ : int = 0
A_ : Any = dataset_name
A_ : Tuple = cache_dir
A_ : str = use_local_dummy_data
A_ : Any = config
# download_callbacks take a single url as input
A_ : List[Any] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
A_ : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
A_ : List[str] = str(_lowercase )
# to be downloaded
A_ : Optional[int] = None
A_ : Dict = None
@property
def _lowerCamelCase ( self ):
if self._dummy_file is None:
A_ : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def _lowerCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def _lowerCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def _lowerCamelCase ( self ):
A_ : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
A_ : Dict = cached_path(
_lowercase , cache_dir=self.cache_dir , extract_compressed_file=_lowercase , force_extract=_lowercase )
return os.path.join(_lowercase , self.dummy_file_name )
@property
def _lowerCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _lowerCamelCase ( self ):
if self._bucket_url is None:
A_ : Optional[int] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def _lowerCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def _lowerCamelCase ( self , a__ , *a__ ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
A_ : str = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
A_ : int = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_lowercase , _lowercase ):
return self.create_dummy_data_dict(_lowercase , _lowercase )
elif isinstance(_lowercase , (list, tuple) ):
return self.create_dummy_data_list(_lowercase , _lowercase )
else:
return self.create_dummy_data_single(_lowercase , _lowercase )
def _lowerCamelCase ( self , a__ , *a__ ):
return self.download_and_extract(_lowercase )
def _lowerCamelCase ( self , a__ , a__ ):
return self.download_and_extract(_lowercase )
def _lowerCamelCase ( self , a__ , *a__ , **a__ ):
return path
def _lowerCamelCase ( self ):
return {}
def _lowerCamelCase ( self , a__ , a__ ):
A_ : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_lowercase , _lowercase ):
for single_url in single_urls:
download_callback(_lowercase )
else:
A_ : int = single_urls
download_callback(_lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_lowercase , _lowercase ):
A_ : Dict = [os.path.join(_lowercase , urllib.parse.quote_plus(Path(_lowercase ).name ) ) for x in single_urls]
else:
A_ : List[str] = single_urls
A_ : Optional[Any] = os.path.join(_lowercase , urllib.parse.quote_plus(Path(_lowercase ).name ) )
A_ : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(_lowercase , _lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
A_ : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowerCamelCase ( self , a__ , a__ ):
A_ : List[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
A_ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , _lowercase ) ) for url in data_url )
A_ : Tuple = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
A_ : Tuple = [data_url[0]] * len(_lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A_ : str = os.path.join(_lowercase , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(_lowercase )
return dummy_data_list
def _lowerCamelCase ( self , a__ , a__ ):
for download_callback in self.download_callbacks:
download_callback(_lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A_ : Union[str, Any] = os.path.join(_lowercase , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(_lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self , a__ ):
def _iter_archive_members(a__ ):
# this preserves the order of the members inside the ZIP archive
A_ : Optional[Any] = Path(self.dummy_file ).parent
A_ : Optional[int] = path.relative_to(_lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
A_ : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_lowercase )
A_ : Dict = Path(_lowercase )
A_ : str = _iter_archive_members(_lowercase ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(_lowercase ).as_posix(), file_path.open("""rb""" )
def _lowerCamelCase ( self , a__ ):
if not isinstance(_lowercase , _lowercase ):
A_ : str = [paths]
for path in paths:
if os.path.isfile(_lowercase ):
if os.path.basename(_lowercase ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_lowercase ):
if os.path.basename(_lowercase ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(_lowercase ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(_lowercase , _lowercase )
| 569 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class A__ :
def __init__( self : Any , _a : int ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
class A__ :
def __init__( self : List[Any] , _a : Node ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tree
def __UpperCamelCase ( self : Union[str, Any] , _a : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : List[Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =(32, 32)
_SCREAMING_SNAKE_CASE =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_a )
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
def extract(*_a : List[Any] , **_a : Dict ):
class A__ :
def __init__( self : str ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.ones([0] )
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.pixel_values.to(_a )
return self
return Out()
return extract
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.dummy_cond_unet
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a )
_SCREAMING_SNAKE_CASE =self.dummy_vae
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_SCREAMING_SNAKE_CASE =77
_SCREAMING_SNAKE_CASE =self.dummy_image.to(_a )
_SCREAMING_SNAKE_CASE =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_a , )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_a , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE =np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_cond_unet
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a )
_SCREAMING_SNAKE_CASE =self.dummy_vae
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_SCREAMING_SNAKE_CASE =77
_SCREAMING_SNAKE_CASE =self.dummy_image.to(_a )
# put models in fp16
_SCREAMING_SNAKE_CASE =unet.half()
_SCREAMING_SNAKE_CASE =vae.half()
_SCREAMING_SNAKE_CASE =bert.half()
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_a , num_inference_steps=2 , output_type='''np''' , image=_a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
_SCREAMING_SNAKE_CASE =init_image.resize((760, 504) )
_SCREAMING_SNAKE_CASE ='''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
_SCREAMING_SNAKE_CASE =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_SCREAMING_SNAKE_CASE =np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_SCREAMING_SNAKE_CASE =init_image.resize((768, 512) )
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
_SCREAMING_SNAKE_CASE ='''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
import math
import qiskit
def lowerCamelCase( a__ = 1 ,a__ = 1 ,a__ = 1):
if (
isinstance(a__ ,a__)
or isinstance(a__ ,a__)
or isinstance(a__ ,a__)
):
raise TypeError('''inputs must be integers.''')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''')
if (
(math.floor(a__) != input_a)
or (math.floor(a__) != input_a)
or (math.floor(a__) != carry_in)
):
raise ValueError('''inputs must be exact integers.''')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''')
# build registers
_SCREAMING_SNAKE_CASE =qiskit.QuantumRegister(4 ,'''qr''')
_SCREAMING_SNAKE_CASE =qiskit.ClassicalRegister(2 ,'''cr''')
# list the entries
_SCREAMING_SNAKE_CASE =[input_a, input_a, carry_in]
_SCREAMING_SNAKE_CASE =qiskit.QuantumCircuit(a__ ,a__)
for i in range(0 ,3):
if entry[i] == 2:
quantum_circuit.h(a__) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a__) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a__) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3) # ccx = toffoli gate
quantum_circuit.cx(0 ,1)
quantum_circuit.ccx(1 ,2 ,3)
quantum_circuit.cx(1 ,2)
quantum_circuit.cx(0 ,1)
quantum_circuit.measure([2, 3] ,a__) # measure the last two qbits
_SCREAMING_SNAKE_CASE =qiskit.Aer.get_backend('''aer_simulator''')
_SCREAMING_SNAKE_CASE =qiskit.execute(a__ ,a__ ,shots=1000)
return job.result().get_counts(a__)
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =[]
def parse_line(a__):
for line in fp:
if isinstance(a__ ,a__):
_SCREAMING_SNAKE_CASE =line.decode('''UTF-8''')
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' '''):
# process a single warning and move it to `selected_warnings`.
if len(a__) > 0:
_SCREAMING_SNAKE_CASE ='''\n'''.join(a__)
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets):
selected_warnings.add(a__)
buffer.clear()
continue
else:
_SCREAMING_SNAKE_CASE =line.strip()
buffer.append(a__)
if from_gh:
for filename in os.listdir(a__):
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,a__)
if not os.path.isdir(a__):
# read the file
if filename != "warnings.txt":
continue
with open(a__) as fp:
parse_line(a__)
else:
try:
with zipfile.ZipFile(a__) as z:
for filename in z.namelist():
if not os.path.isdir(a__):
# read the file
if filename != "warnings.txt":
continue
with z.open(a__) as fp:
parse_line(a__)
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.")
return selected_warnings
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =[os.path.join(a__ ,a__) for p in os.listdir(a__) if (p.endswith('''.zip''') or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a__ ,a__))
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase( a__):
return values.split(''',''')
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
snake_case_ : Tuple = parser.parse_args()
snake_case_ : List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
snake_case_ : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
snake_case_ : str = extract_warnings(args.output_dir, args.targets)
snake_case_ : Dict = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] , _a : List[str] , _a : Any ) -> Dict:
"""simple docstring"""
return f"gaussian_noise_s={seed}_shape={'_'.join([str(_a ) for s in shape] )}.npy"
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any]=0 , _a : Union[str, Any]=(4, 4, 64, 64) , _a : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =jnp.bfloataa if fpaa else jnp.floataa
_SCREAMING_SNAKE_CASE =jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return image
def __UpperCamelCase ( self : Dict , _a : Tuple=False , _a : Tuple="CompVis/stable-diffusion-v1-4" ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =jnp.bfloataa if fpaa else jnp.floataa
_SCREAMING_SNAKE_CASE ='''bf16''' if fpaa else None
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =FlaxUNetaDConditionModel.from_pretrained(
_a , subfolder='''unet''' , dtype=_a , revision=_a )
return model, params
def __UpperCamelCase ( self : Dict , _a : List[str]=0 , _a : str=(4, 77, 768) , _a : str=False ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =jnp.bfloataa if fpaa else jnp.floataa
_SCREAMING_SNAKE_CASE =jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1000, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : int , _a : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_a )
_SCREAMING_SNAKE_CASE =self.get_latents(_a , fpaa=_a )
_SCREAMING_SNAKE_CASE =self.get_encoder_hidden_states(_a , fpaa=_a )
_SCREAMING_SNAKE_CASE =model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
_SCREAMING_SNAKE_CASE =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE =jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a , _a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1000, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def __UpperCamelCase ( self : Optional[Any] , _a : int , _a : List[Any] , _a : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_a )
_SCREAMING_SNAKE_CASE =self.get_latents(_a , shape=(4, 4, 96, 96) , fpaa=_a )
_SCREAMING_SNAKE_CASE =self.get_encoder_hidden_states(_a , shape=(4, 77, 1024) , fpaa=_a )
_SCREAMING_SNAKE_CASE =model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
_SCREAMING_SNAKE_CASE =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE =jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a , _a , atol=1E-2 ) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Tuple = 12_80_22
snake_case_ : str = 12_80_28
@require_sentencepiece
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = MaMaaaTokenizer
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_SCREAMING_SNAKE_CASE =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : int , **_a : Tuple ) -> int:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Any , _a : List[Any] ) -> Tuple:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''</s>'''
_SCREAMING_SNAKE_CASE =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(_a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [2, 3, 4, 5, 6] , )
_SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
_SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_string(_a )
self.assertEqual(_a , '''This is a test''' )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
UpperCAmelCase = "facebook/m2m100_418M"
UpperCAmelCase = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
UpperCAmelCase = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
UpperCAmelCase = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __UpperCamelCase ( cls : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
_SCREAMING_SNAKE_CASE =1
return cls
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer.get_vocab()
self.assertEqual(len(_a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , _a )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''en'''
_SCREAMING_SNAKE_CASE =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
self.assertIn(_a , self.tokenizer.all_special_ids )
# fmt: off
_SCREAMING_SNAKE_CASE =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
_SCREAMING_SNAKE_CASE =self.tokenizer.decode(_a , skip_special_tokens=_a )
_SCREAMING_SNAKE_CASE =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =MaMaaaTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.lang_token_to_id , _a )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''en'''
_SCREAMING_SNAKE_CASE ='''fr'''
_SCREAMING_SNAKE_CASE =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_SCREAMING_SNAKE_CASE =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_SCREAMING_SNAKE_CASE ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_SCREAMING_SNAKE_CASE ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(_a ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , ) | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
snake_case_ : Dict = logging.get_logger(__name__)
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["audio_values", "audio_mask"]
def __init__( self : Optional[int] , _a : Tuple=2048 , _a : Optional[Any]=1 , _a : Union[str, Any]=[16, 16] , _a : str=128 , _a : int=4_4100 , _a : Any=86 , _a : Union[str, Any]=2048 , _a : List[str]=0.0 , **_a : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
_SCREAMING_SNAKE_CASE =spectrogram_length
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =feature_size // self.patch_size[1]
_SCREAMING_SNAKE_CASE =n_fft
_SCREAMING_SNAKE_CASE =sampling_rate // hop_length_to_sampling_rate
_SCREAMING_SNAKE_CASE =sampling_rate
_SCREAMING_SNAKE_CASE =padding_value
_SCREAMING_SNAKE_CASE =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=_a , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __UpperCamelCase ( self : Dict , _a : np.array ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =spectrogram(
_a , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
_SCREAMING_SNAKE_CASE =log_spec[:, :-1]
_SCREAMING_SNAKE_CASE =log_spec - 20.0
_SCREAMING_SNAKE_CASE =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[int] , _a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _a : Optional[Union[str, TensorType]] = None , _a : Optional[bool] = True , _a : Optional[int] = None , _a : bool = False , _a : bool = False , **_a : List[str] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_SCREAMING_SNAKE_CASE =isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_SCREAMING_SNAKE_CASE =is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_SCREAMING_SNAKE_CASE =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
_SCREAMING_SNAKE_CASE =np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_SCREAMING_SNAKE_CASE =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
_SCREAMING_SNAKE_CASE =[np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_SCREAMING_SNAKE_CASE =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_SCREAMING_SNAKE_CASE =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_SCREAMING_SNAKE_CASE =np.array(_a ).astype(np.floataa )
# convert into correct format for padding
_SCREAMING_SNAKE_CASE =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_SCREAMING_SNAKE_CASE =np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE =padded_audio_features * self.padding_value
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =audio_features[i]
_SCREAMING_SNAKE_CASE =feature
# return as BatchFeature
if return_attention_mask:
_SCREAMING_SNAKE_CASE ={'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
_SCREAMING_SNAKE_CASE ={'''audio_values''': padded_audio_features}
_SCREAMING_SNAKE_CASE =BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_uncond_unet
_SCREAMING_SNAKE_CASE =PNDMScheduler()
_SCREAMING_SNAKE_CASE =PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pndm(generator=_a , num_inference_steps=20 , output_type='''numpy''' ).images
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pndm(generator=_a , num_inference_steps=20 , output_type='''numpy''' , return_dict=_a )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''google/ddpm-cifar10-32'''
_SCREAMING_SNAKE_CASE =UNetaDModel.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =PNDMScheduler()
_SCREAMING_SNAKE_CASE =PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pndm(generator=_a , output_type='''numpy''' ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE =np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
snake_case_ : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE ={}
state_dict.pop('''pixel_mean''' ,a__)
state_dict.pop('''pixel_std''' ,a__)
_SCREAMING_SNAKE_CASE =R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE =key.replace(a__ ,a__)
if re.match(a__ ,a__):
_SCREAMING_SNAKE_CASE =int(re.match(a__ ,a__).group(2))
if layer_nb == 0:
_SCREAMING_SNAKE_CASE =key.replace('''layers.0''' ,'''proj_in''')
elif layer_nb == 1:
_SCREAMING_SNAKE_CASE =key.replace('''layers.1''' ,'''layers.0''')
elif layer_nb == 2:
_SCREAMING_SNAKE_CASE =key.replace('''layers.2''' ,'''proj_out''')
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def lowerCamelCase( a__ ,a__ ,a__ ,a__="ybelkada/segment-anything"):
_SCREAMING_SNAKE_CASE =hf_hub_download(a__ ,f"checkpoints/{model_name}.pth")
if "sam_vit_b" in model_name:
_SCREAMING_SNAKE_CASE =SamConfig()
elif "sam_vit_l" in model_name:
_SCREAMING_SNAKE_CASE =SamVisionConfig(
hidden_size=1024 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
_SCREAMING_SNAKE_CASE =SamConfig(
vision_config=a__ ,)
elif "sam_vit_h" in model_name:
_SCREAMING_SNAKE_CASE =SamVisionConfig(
hidden_size=1280 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
_SCREAMING_SNAKE_CASE =SamConfig(
vision_config=a__ ,)
_SCREAMING_SNAKE_CASE =torch.load(a__ ,map_location='''cpu''')
_SCREAMING_SNAKE_CASE =replace_keys(a__)
_SCREAMING_SNAKE_CASE =SamImageProcessor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=a__)
_SCREAMING_SNAKE_CASE =SamModel(a__)
hf_model.load_state_dict(a__)
_SCREAMING_SNAKE_CASE =hf_model.to('''cuda''')
_SCREAMING_SNAKE_CASE ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw).convert('''RGB''')
_SCREAMING_SNAKE_CASE =[[[400, 650]]]
_SCREAMING_SNAKE_CASE =[[1]]
_SCREAMING_SNAKE_CASE =processor(images=np.array(a__) ,return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**a__)
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
_SCREAMING_SNAKE_CASE =processor(
images=np.array(a__) ,input_points=a__ ,input_labels=a__ ,return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**a__)
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
_SCREAMING_SNAKE_CASE =((75, 275, 1725, 850),)
_SCREAMING_SNAKE_CASE =processor(images=np.array(a__) ,input_boxes=a__ ,return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**a__)
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
_SCREAMING_SNAKE_CASE =[[[400, 650], [800, 650]]]
_SCREAMING_SNAKE_CASE =[[1, 1]]
_SCREAMING_SNAKE_CASE =processor(
images=np.array(a__) ,input_points=a__ ,input_labels=a__ ,return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**a__)
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
snake_case_ : str = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
snake_case_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id) | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ='''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw).convert('''RGB''')
return image
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding'''))
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding'''))
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight'''))
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias'''))
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight'''))
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias'''))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",))
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight'''))
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias'''))
# fmt: on
return rename_keys
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =dct.pop(a__)
_SCREAMING_SNAKE_CASE =val
def lowerCamelCase( a__ ,a__):
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
_SCREAMING_SNAKE_CASE =state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias")
_SCREAMING_SNAKE_CASE =state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias")
# next, set bias in the state dict
_SCREAMING_SNAKE_CASE =torch.cat((q_bias, torch.zeros_like(a__ ,requires_grad=a__), v_bias))
_SCREAMING_SNAKE_CASE =qkv_bias
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =364 if '''coco''' in model_name else 224
_SCREAMING_SNAKE_CASE =BlipaVisionConfig(image_size=a__).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_SCREAMING_SNAKE_CASE =OPTConfig.from_pretrained('''facebook/opt-2.7b''' ,eos_token_id=a__).to_dict()
elif "opt-6.7b" in model_name:
_SCREAMING_SNAKE_CASE =OPTConfig.from_pretrained('''facebook/opt-6.7b''' ,eos_token_id=a__).to_dict()
elif "t5-xl" in model_name:
_SCREAMING_SNAKE_CASE =TaConfig.from_pretrained('''google/flan-t5-xl''' ,dense_act_fn='''gelu''' ,bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
_SCREAMING_SNAKE_CASE =TaConfig.from_pretrained('''google/flan-t5-xxl''' ,dense_act_fn='''gelu''' ,bos_token_id=1).to_dict()
_SCREAMING_SNAKE_CASE =BlipaConfig(vision_config=a__ ,text_config=a__)
return config, image_size
@torch.no_grad()
def lowerCamelCase( a__ ,a__=None ,a__=False):
_SCREAMING_SNAKE_CASE =(
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''')
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''')
)
_SCREAMING_SNAKE_CASE =tokenizer('''\n''' ,add_special_tokens=a__).input_ids[0]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_blipa_config(a__ ,eos_token_id=a__)
_SCREAMING_SNAKE_CASE =BlipaForConditionalGeneration(a__).eval()
_SCREAMING_SNAKE_CASE ={
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model_name_to_original[model_name]
# load original model
print('''Loading original model...''')
_SCREAMING_SNAKE_CASE ='''cuda''' if torch.cuda.is_available() else '''cpu'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =load_model_and_preprocess(
name=a__ ,model_type=a__ ,is_eval=a__ ,device=a__)
original_model.eval()
print('''Done!''')
# update state dict keys
_SCREAMING_SNAKE_CASE =original_model.state_dict()
_SCREAMING_SNAKE_CASE =create_rename_keys(a__)
for src, dest in rename_keys:
rename_key(a__ ,a__ ,a__)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE =state_dict.pop(a__)
if key.startswith('''Qformer.bert'''):
_SCREAMING_SNAKE_CASE =key.replace('''Qformer.bert''' ,'''qformer''')
if "attention.self" in key:
_SCREAMING_SNAKE_CASE =key.replace('''self''' ,'''attention''')
if "opt_proj" in key:
_SCREAMING_SNAKE_CASE =key.replace('''opt_proj''' ,'''language_projection''')
if "t5_proj" in key:
_SCREAMING_SNAKE_CASE =key.replace('''t5_proj''' ,'''language_projection''')
if key.startswith('''opt'''):
_SCREAMING_SNAKE_CASE =key.replace('''opt''' ,'''language''')
if key.startswith('''t5'''):
_SCREAMING_SNAKE_CASE =key.replace('''t5''' ,'''language''')
_SCREAMING_SNAKE_CASE =val
# read in qv biases
read_in_q_v_bias(a__ ,a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =hf_model.load_state_dict(a__ ,strict=a__)
assert len(a__) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_SCREAMING_SNAKE_CASE =load_demo_image()
_SCREAMING_SNAKE_CASE =vis_processors['''eval'''](a__).unsqueeze(0).to(a__)
_SCREAMING_SNAKE_CASE =tokenizer(['''\n'''] ,return_tensors='''pt''').input_ids.to(a__)
# create processor
_SCREAMING_SNAKE_CASE =BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} ,image_mean=a__ ,image_std=a__)
_SCREAMING_SNAKE_CASE =BlipaProcessor(image_processor=a__ ,tokenizer=a__)
_SCREAMING_SNAKE_CASE =processor(images=a__ ,return_tensors='''pt''').pixel_values.to(a__)
# make sure processor creates exact same pixel values
assert torch.allclose(a__ ,a__)
original_model.to(a__)
hf_model.to(a__)
with torch.no_grad():
if "opt" in model_name:
_SCREAMING_SNAKE_CASE =original_model({'''image''': original_pixel_values, '''text_input''': ['''''']}).logits
_SCREAMING_SNAKE_CASE =hf_model(a__ ,a__).logits
else:
_SCREAMING_SNAKE_CASE =original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']}).logits
_SCREAMING_SNAKE_CASE =input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-100)
_SCREAMING_SNAKE_CASE =hf_model(a__ ,a__ ,labels=a__).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' ,original_logits[0, :3, :3])
print('''First values of HF logits:''' ,logits[0, :3, :3])
# assert values
if model_name == "blip2-flan-t5-xl":
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] ,device=a__)
assert torch.allclose(logits[0, :3, :3] ,a__ ,atol=1e-4)
elif model_name == "blip2-flan-t5-xl-coco":
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] ,device=a__)
else:
# cast to same type
_SCREAMING_SNAKE_CASE =logits.dtype
assert torch.allclose(original_logits.to(a__) ,a__ ,atol=1e-2)
print('''Looks ok!''')
print('''Generating a caption...''')
_SCREAMING_SNAKE_CASE =''''''
_SCREAMING_SNAKE_CASE =tokenizer(a__ ,return_tensors='''pt''').input_ids.to(a__)
_SCREAMING_SNAKE_CASE =original_model.generate({'''image''': original_pixel_values})
_SCREAMING_SNAKE_CASE =hf_model.generate(
a__ ,a__ ,do_sample=a__ ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print('''Original generation:''' ,a__)
_SCREAMING_SNAKE_CASE =input_ids.shape[1]
_SCREAMING_SNAKE_CASE =processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=a__)
_SCREAMING_SNAKE_CASE =[text.strip() for text in output_text]
print('''HF generation:''' ,a__)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__)
hf_model.save_pretrained(a__)
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}")
hf_model.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
snake_case_ : List[Any] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
snake_case_ : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
from collections.abc import Generator
from math import sin
def lowerCamelCase( a__):
if len(a__) != 32:
raise ValueError('''Input must be of length 32''')
_SCREAMING_SNAKE_CASE =b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCamelCase( a__):
if i < 0:
raise ValueError('''Input must be non-negative''')
_SCREAMING_SNAKE_CASE =format(a__ ,'''08x''')[-8:]
_SCREAMING_SNAKE_CASE =b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''')
return little_endian_hex
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =b''''''
for char in message:
bit_string += format(a__ ,'''08b''').encode('''utf-8''')
_SCREAMING_SNAKE_CASE =format(len(a__) ,'''064b''').encode('''utf-8''')
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(a__) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def lowerCamelCase( a__):
if len(a__) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''')
for pos in range(0 ,len(a__) ,512):
_SCREAMING_SNAKE_CASE =bit_string[pos : pos + 512]
_SCREAMING_SNAKE_CASE =[]
for i in range(0 ,512 ,32):
block_words.append(int(to_little_endian(block[i : i + 32]) ,2))
yield block_words
def lowerCamelCase( a__):
if i < 0:
raise ValueError('''Input must be non-negative''')
_SCREAMING_SNAKE_CASE =format(a__ ,'''032b''')
_SCREAMING_SNAKE_CASE =''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(a__ ,2)
def lowerCamelCase( a__ ,a__):
return (a + b) % 2**32
def lowerCamelCase( a__ ,a__):
if i < 0:
raise ValueError('''Input must be non-negative''')
if shift < 0:
raise ValueError('''Shift must be non-negative''')
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =preprocess(a__)
_SCREAMING_SNAKE_CASE =[int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
_SCREAMING_SNAKE_CASE =0x6745_2301
_SCREAMING_SNAKE_CASE =0xefcd_ab89
_SCREAMING_SNAKE_CASE =0x98ba_dcfe
_SCREAMING_SNAKE_CASE =0x1032_5476
_SCREAMING_SNAKE_CASE =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(a__):
_SCREAMING_SNAKE_CASE =aa
_SCREAMING_SNAKE_CASE =ba
_SCREAMING_SNAKE_CASE =ca
_SCREAMING_SNAKE_CASE =da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_SCREAMING_SNAKE_CASE =d ^ (b & (c ^ d))
_SCREAMING_SNAKE_CASE =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_SCREAMING_SNAKE_CASE =c ^ (d & (b ^ c))
_SCREAMING_SNAKE_CASE =(5 * i + 1) % 16
elif i <= 47:
_SCREAMING_SNAKE_CASE =b ^ c ^ d
_SCREAMING_SNAKE_CASE =(3 * i + 5) % 16
else:
_SCREAMING_SNAKE_CASE =c ^ (b | not_aa(a__))
_SCREAMING_SNAKE_CASE =(7 * i) % 16
_SCREAMING_SNAKE_CASE =(f + a + added_consts[i] + block_words[g]) % 2**32
_SCREAMING_SNAKE_CASE =d
_SCREAMING_SNAKE_CASE =c
_SCREAMING_SNAKE_CASE =b
_SCREAMING_SNAKE_CASE =sum_aa(a__ ,left_rotate_aa(a__ ,shift_amounts[i]))
# Add hashed chunk to running total
_SCREAMING_SNAKE_CASE =sum_aa(a__ ,a__)
_SCREAMING_SNAKE_CASE =sum_aa(a__ ,a__)
_SCREAMING_SNAKE_CASE =sum_aa(a__ ,a__)
_SCREAMING_SNAKE_CASE =sum_aa(a__ ,a__)
_SCREAMING_SNAKE_CASE =reformat_hex(a__) + reformat_hex(a__) + reformat_hex(a__) + reformat_hex(a__)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ ,a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
_SCREAMING_SNAKE_CASE =s_dict.pop(a__)
elif "subsample" in key:
_SCREAMING_SNAKE_CASE =s_dict.pop(a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =emb.weight.shape
_SCREAMING_SNAKE_CASE =nn.Linear(a__ ,a__ ,bias=a__)
_SCREAMING_SNAKE_CASE =emb.weight.data
return lin_layer
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =torch.load(a__ ,map_location='''cpu''')
_SCREAMING_SNAKE_CASE =mam_aaa['''args''']
_SCREAMING_SNAKE_CASE =mam_aaa['''model''']
_SCREAMING_SNAKE_CASE =state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a__)
rename_keys(a__)
_SCREAMING_SNAKE_CASE =state_dict['''decoder.embed_tokens.weight'''].shape[0]
_SCREAMING_SNAKE_CASE =args.share_decoder_input_output_embed
_SCREAMING_SNAKE_CASE =[int(a__) for i in args.conv_kernel_sizes.split(''',''')]
_SCREAMING_SNAKE_CASE =SpeechaTextConfig(
vocab_size=a__ ,max_source_positions=args.max_source_positions ,max_target_positions=args.max_target_positions ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='''relu''' ,num_conv_layers=len(a__) ,conv_channels=args.conv_channels ,conv_kernel_sizes=a__ ,input_feat_per_channel=args.input_feat_per_channel ,input_channels=args.input_channels ,tie_word_embeddings=a__ ,num_beams=5 ,max_length=200 ,use_cache=a__ ,decoder_start_token_id=2 ,early_stopping=a__ ,)
_SCREAMING_SNAKE_CASE =SpeechaTextForConditionalGeneration(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model.model.load_state_dict(a__ ,strict=a__)
if len(a__) > 0 and not set(a__) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}")
if tie_embeds:
_SCREAMING_SNAKE_CASE =make_linear_from_emb(model.model.decoder.embed_tokens)
else:
_SCREAMING_SNAKE_CASE =lm_head_weights
model.save_pretrained(a__)
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case_ : Any = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path) | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
) | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
from ...configuration_utils import PretrainedConfig
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "bert-generation"
def __init__( self : Union[str, Any] , _a : int=5_0358 , _a : List[str]=1024 , _a : Dict=24 , _a : int=16 , _a : Union[str, Any]=4096 , _a : List[str]="gelu" , _a : Union[str, Any]=0.1 , _a : int=0.1 , _a : List[str]=512 , _a : Optional[Any]=0.02 , _a : Optional[Any]=1E-12 , _a : int=0 , _a : List[Any]=2 , _a : List[str]=1 , _a : str="absolute" , _a : Optional[Any]=True , **_a : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =use_cache | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
from math import factorial
def lowerCamelCase( a__ = 100):
return sum(map(a__ ,str(factorial(a__))))
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
snake_case_ : Union[str, Any] = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
snake_case_ : int = '''>>zh<<'''
snake_case_ : Union[str, Any] = '''Helsinki-NLP/'''
if is_torch_available():
snake_case_ : Optional[int] = '''pt'''
elif is_tf_available():
snake_case_ : Optional[int] = '''tf'''
else:
snake_case_ : str = '''jax'''
@require_sentencepiece
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = MarianTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
_SCREAMING_SNAKE_CASE =MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : int , **_a : Union[str, Any] ) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , _a : int ) -> List[str]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''</s>'''
_SCREAMING_SNAKE_CASE =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_a ) , 9 )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
_SCREAMING_SNAKE_CASE =en_de_tokenizer(['''I am a small frog'''] , return_tensors=_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =[38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_a , batch.input_ids[0] )
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =[x.name for x in Path(_a ).glob('''*''' )]
self.assertIn('''source.spm''' , _a )
MarianTokenizer.from_pretrained(_a )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
_SCREAMING_SNAKE_CASE ='''Tämä on testi'''
_SCREAMING_SNAKE_CASE ='''This is a test'''
_SCREAMING_SNAKE_CASE =[76, 7, 2047, 2]
_SCREAMING_SNAKE_CASE =[69, 12, 11, 940, 2]
_SCREAMING_SNAKE_CASE =tokenizer(_a ).input_ids
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokenizer(text_target=_a ).input_ids
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokenizer.decode(_a , skip_special_tokens=_a )
self.assertEqual(_a , _a ) | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A__ :
UpperCAmelCase = 42
UpperCAmelCase = None
UpperCAmelCase = None
def lowerCamelCase( a__):
# Validation
def is_valid_tree(a__) -> bool:
if node is None:
return True
if not isinstance(a__ ,a__):
return False
try:
float(node.data)
except (TypeError, ValueError):
return False
return is_valid_tree(node.left) and is_valid_tree(node.right)
if not is_valid_tree(a__):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''')
def is_binary_search_tree_recursive_check(
a__ ,a__ ,a__) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,a__ ,node.data)
and is_binary_search_tree_recursive_check(
node.right ,node.data ,a__)
)
return is_binary_search_tree_recursive_check(a__ ,-float('''inf''') ,float('''inf'''))
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case_ : int = ['''small''', '''medium''', '''large''']
snake_case_ : Optional[Any] = '''lm_head.decoder.weight'''
snake_case_ : Tuple = '''lm_head.weight'''
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =torch.load(a__)
_SCREAMING_SNAKE_CASE =d.pop(a__)
os.makedirs(a__ ,exist_ok=a__)
torch.save(a__ ,os.path.join(a__ ,a__))
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
snake_case_ : Optional[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case_ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
snake_case_ : int = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
) | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=UpperCamelCase__ ):
UpperCAmelCase = ["torch", "scipy"]
def __init__( self : Optional[Any] , *_a : Union[str, Any] , **_a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCamelCase ( cls : str , *_a : Any , **_a : Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *_a : Any , **_a : Dict ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] ) | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
import enum
import shutil
import sys
snake_case_ , snake_case_ : Any = shutil.get_terminal_size()
snake_case_ : List[str] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class A__ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
def lowerCamelCase( a__ ,a__=""):
sys.stdout.write(str(a__) + end)
sys.stdout.flush()
def lowerCamelCase( a__ ,a__ ,a__=""):
forceWrite(f"\u001b[{color}m{content}\u001b[0m" ,a__)
def lowerCamelCase( ):
forceWrite('''\r''')
def lowerCamelCase( a__ ,a__):
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
def lowerCamelCase( ):
forceWrite(''' ''' * TERMINAL_WIDTH)
reset_cursor()
def lowerCamelCase( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH) | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case_ : List[Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case_ : Optional[int] = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case_ : Tuple = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case_ : int = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case_ : Any = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case_ : List[Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case_ : List[str] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case_ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case_ : Tuple = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case_ : List[str] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case_ : Tuple = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case_ : Dict = re.compile(R'''^\s*else:''')
def lowerCamelCase( a__):
if _re_test_backend.search(a__) is None:
return None
_SCREAMING_SNAKE_CASE =[b[0] for b in _re_backend.findall(a__)]
backends.sort()
return "_and_".join(a__)
def lowerCamelCase( a__):
with open(a__ ,'''r''' ,encoding='''utf-8''' ,newline='''\n''') as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =0
while line_index < len(a__) and not lines[line_index].startswith('''_import_structure = {'''):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a__):
return None
# First grab the objects without a specific backend in _import_structure
_SCREAMING_SNAKE_CASE =[]
while not lines[line_index].startswith('''if TYPE_CHECKING''') and find_backend(lines[line_index]) is None:
_SCREAMING_SNAKE_CASE =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a__):
_SCREAMING_SNAKE_CASE =_re_one_line_import_struct.search(a__).groups()[0]
_SCREAMING_SNAKE_CASE =re.findall('''\[([^\]]+)\]''' ,a__)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''')])
line_index += 1
continue
_SCREAMING_SNAKE_CASE =_re_import_struct_key_value.search(a__)
if single_line_import_search is not None:
_SCREAMING_SNAKE_CASE =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''') if len(a__) > 0]
objects.extend(a__)
elif line.startswith(''' ''' * 8 + '''"'''):
objects.append(line[9:-3])
line_index += 1
_SCREAMING_SNAKE_CASE ={'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING'''):
# If the line is an if not is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE =find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_SCREAMING_SNAKE_CASE =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 4):
_SCREAMING_SNAKE_CASE =lines[line_index]
if _re_import_struct_add_one.search(a__) is not None:
objects.append(_re_import_struct_add_one.search(a__).groups()[0])
elif _re_import_struct_add_many.search(a__) is not None:
_SCREAMING_SNAKE_CASE =_re_import_struct_add_many.search(a__).groups()[0].split(''', ''')
_SCREAMING_SNAKE_CASE =[obj[1:-1] for obj in imports if len(a__) > 0]
objects.extend(a__)
elif _re_between_brackets.search(a__) is not None:
_SCREAMING_SNAKE_CASE =_re_between_brackets.search(a__).groups()[0].split(''', ''')
_SCREAMING_SNAKE_CASE =[obj[1:-1] for obj in imports if len(a__) > 0]
objects.extend(a__)
elif _re_quote_object.search(a__) is not None:
objects.append(_re_quote_object.search(a__).groups()[0])
elif line.startswith(''' ''' * 8 + '''"'''):
objects.append(line[9:-3])
elif line.startswith(''' ''' * 12 + '''"'''):
objects.append(line[13:-3])
line_index += 1
_SCREAMING_SNAKE_CASE =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_SCREAMING_SNAKE_CASE =[]
while (
line_index < len(a__)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith('''else''')
):
_SCREAMING_SNAKE_CASE =lines[line_index]
_SCREAMING_SNAKE_CASE =_re_import.search(a__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 8):
objects.append(line[8:-2])
line_index += 1
_SCREAMING_SNAKE_CASE ={'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(a__):
# If the line is an if is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE =find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_SCREAMING_SNAKE_CASE =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 8):
_SCREAMING_SNAKE_CASE =lines[line_index]
_SCREAMING_SNAKE_CASE =_re_import.search(a__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 12):
objects.append(line[12:-2])
line_index += 1
_SCREAMING_SNAKE_CASE =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase( a__ ,a__):
def find_duplicates(a__):
return [k for k, v in collections.Counter(a__).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
_SCREAMING_SNAKE_CASE =[]
for key in import_dict_objects.keys():
_SCREAMING_SNAKE_CASE =find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
_SCREAMING_SNAKE_CASE =find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
_SCREAMING_SNAKE_CASE ='''base imports''' if key == '''none''' else f"{key} backend"
errors.append(f"Differences for {name}:")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
return errors
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =[]
for root, _, files in os.walk(a__):
if "__init__.py" in files:
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''__init__.py''')
_SCREAMING_SNAKE_CASE =parse_init(a__)
if objects is not None:
_SCREAMING_SNAKE_CASE =analyze_results(*a__)
if len(a__) > 0:
_SCREAMING_SNAKE_CASE =f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(a__))
if len(a__) > 0:
raise ValueError('''\n\n'''.join(a__))
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =[]
for path, directories, files in os.walk(a__):
for folder in directories:
# Ignore private modules
if folder.startswith('''_'''):
directories.remove(a__)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a__) / folder).glob('''*.py'''))) == 0:
continue
_SCREAMING_SNAKE_CASE =str((Path(a__) / folder).relative_to(a__))
_SCREAMING_SNAKE_CASE =short_path.replace(os.path.sep ,'''.''')
submodules.append(a__)
for fname in files:
if fname == "__init__.py":
continue
_SCREAMING_SNAKE_CASE =str((Path(a__) / fname).relative_to(a__))
_SCREAMING_SNAKE_CASE =short_path.replace('''.py''' ,'''''').replace(os.path.sep ,'''.''')
if len(submodule.split('''.''')) == 1:
submodules.append(a__)
return submodules
snake_case_ : int = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def lowerCamelCase( ):
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE =importlib.util.spec_from_file_location(
'''transformers''' ,os.path.join(a__ ,'''__init__.py''') ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
_SCREAMING_SNAKE_CASE =spec.loader.load_module()
_SCREAMING_SNAKE_CASE =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(a__) > 0:
_SCREAMING_SNAKE_CASE ='''\n'''.join(f"- {module}" for module in module_not_registered)
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''')
if __name__ == "__main__":
check_all_inits()
check_submodules() | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =current_set.copy()
for row_index, row in enumerate(a__):
_SCREAMING_SNAKE_CASE =row[0]
for column_index, column in enumerate(a__):
if magnitude == 0:
_SCREAMING_SNAKE_CASE =column
continue
_SCREAMING_SNAKE_CASE =column / magnitude
# Subtract to cancel term
_SCREAMING_SNAKE_CASE =current_set[0]
_SCREAMING_SNAKE_CASE =[first_row]
_SCREAMING_SNAKE_CASE =current_set[1::]
for row in current_set:
_SCREAMING_SNAKE_CASE =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(a__)
continue
for column_index in range(len(a__)):
temp_row.append(first_row[column_index] - row[column_index])
final_set.append(a__)
# Create next recursion iteration set
if len(final_set[0]) != 3:
_SCREAMING_SNAKE_CASE =final_set[0]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for row in final_set[1::]:
current_first_column.append(row[0])
next_iteration.append(row[1::])
_SCREAMING_SNAKE_CASE =simplify(a__)
for i in range(len(a__)):
resultant[i].insert(0 ,current_first_column[i])
resultant.insert(0 ,a__)
_SCREAMING_SNAKE_CASE =resultant
return final_set
def lowerCamelCase( a__):
if len(a__) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''')
_SCREAMING_SNAKE_CASE =len(a__) + 1
if any(len(a__) != _length for item in equations):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''')
for row in equations:
if any(not isinstance(a__ ,(int, float)) for column in row):
raise ValueError('''solve_simultaneous() requires lists of integers''')
if len(a__) == 1:
return [equations[0][-1] / equations[0][0]]
_SCREAMING_SNAKE_CASE =equations.copy()
if any(0 in row for row in data_set):
_SCREAMING_SNAKE_CASE =data_set.copy()
_SCREAMING_SNAKE_CASE =[]
for row_index, row in enumerate(a__):
if 0 not in row:
_SCREAMING_SNAKE_CASE =data_set.pop(a__)
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''')
data_set.insert(0 ,a__)
_SCREAMING_SNAKE_CASE =data_set.copy()
_SCREAMING_SNAKE_CASE =simplify(a__)
_SCREAMING_SNAKE_CASE =simplified[::-1]
_SCREAMING_SNAKE_CASE =[]
for row in simplified:
_SCREAMING_SNAKE_CASE =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0)
continue
solutions.append(current_solution / row[-2])
continue
_SCREAMING_SNAKE_CASE =row.copy()[: len(a__) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0)
if len(a__) == 0:
solutions.append(0)
continue
_SCREAMING_SNAKE_CASE =temp_row[1::]
_SCREAMING_SNAKE_CASE =temp_row[::-1]
for column_index, column in enumerate(a__):
current_solution -= column * solutions[column_index]
solutions.append(a__)
_SCREAMING_SNAKE_CASE =[]
for item in solutions:
final.append(float(round(a__ ,5)))
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
def lowerCamelCase( a__):
if len(a__) <= 1:
return [tuple(a__)]
_SCREAMING_SNAKE_CASE =[]
def generate(a__ ,a__):
if k == 1:
res.append(tuple(arr[:]))
return
generate(k - 1 ,a__)
for i in range(k - 1):
if k % 2 == 0: # k is even
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =arr[k - 1], arr[i]
else: # k is odd
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =arr[k - 1], arr[0]
generate(k - 1 ,a__)
generate(len(a__) ,a__)
return res
if __name__ == "__main__":
snake_case_ : int = input('''Enter numbers separated by a comma:\n''').strip()
snake_case_ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr)) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
snake_case_ : List[Any] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
snake_case_ : Dict = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
snake_case_ : Any = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __UpperCamelCase ( self : Optional[int] , _a : Optional[Any] , _a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0.0
for i, j in zip(_a , _a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a , _a ) else 0.0
_SCREAMING_SNAKE_CASE =n_correct / len(_a )
return {
"accuracy": accuracy,
} | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self : List[str] , _a : str , _a : List[str]=13 , _a : Tuple=30 , _a : Any=2 , _a : str=3 , _a : Tuple=True , _a : int=True , _a : List[str]=32 , _a : int=5 , _a : List[str]=4 , _a : Union[str, Any]=37 , _a : Optional[int]="gelu" , _a : str=0.1 , _a : Optional[int]=0.1 , _a : Any=10 , _a : Dict=0.02 , _a : List[Any]=3 , _a : Union[str, Any]=None , _a : List[Any]=2 , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE =num_patches + 2
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self : Optional[Any] , _a : List[Any] , _a : Optional[int] , _a : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : str , _a : List[Any] , _a : int , _a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =DeiTForMaskedImageModeling(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Optional[Any] , _a : List[Any] , _a : int , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.type_sequence_label_size
_SCREAMING_SNAKE_CASE =DeiTForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =DeiTForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCamelCase ( self : Union[str, Any] , _a : int , _a : Optional[Any] , _a : List[Any]=False ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_a )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a , return_labels=_a )
_SCREAMING_SNAKE_CASE =model(**_a ).loss
loss.backward()
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_SCREAMING_SNAKE_CASE =model_class(_a )
model.gradient_checkpointing_enable()
model.to(_a )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a , return_labels=_a )
_SCREAMING_SNAKE_CASE =model(**_a ).loss
loss.backward()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =[
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_a ),
*get_values(_a ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
_SCREAMING_SNAKE_CASE =problem_type['''title''']
_SCREAMING_SNAKE_CASE =problem_type['''num_labels''']
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a , return_labels=_a )
if problem_type["num_labels"] > 1:
_SCREAMING_SNAKE_CASE =inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_SCREAMING_SNAKE_CASE =inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_a ) as warning_list:
_SCREAMING_SNAKE_CASE =model(**_a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =DeiTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =inputs.pixel_values.to(_a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(_a ) | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
snake_case_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase( a__):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(a__):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}")
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
_SCREAMING_SNAKE_CASE =try_infer_format_from_ext(args.input) if args.format == '''infer''' else args.format
_SCREAMING_SNAKE_CASE =PipelineDataFormat.from_str(
format=a__ ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(a__ ,a__)
class A__ ( UpperCamelCase__ ):
def __init__( self : str , _a : Pipeline , _a : PipelineDataFormat ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =nlp
_SCREAMING_SNAKE_CASE =reader
@staticmethod
def __UpperCamelCase ( _a : ArgumentParser ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=_a , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=_a , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=_a , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=_a , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=_a , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=_a , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=_a , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=_a , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=_a )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._nlp, []
for entry in self._reader:
_SCREAMING_SNAKE_CASE =nlp(**_a ) if self._reader.is_multi_columns else nlp(_a )
if isinstance(_a , _a ):
outputs.append(_a )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_SCREAMING_SNAKE_CASE =self._reader.save_binary(_a )
logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(_a ) | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A__ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = "dinat"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , _a : Tuple=4 , _a : Dict=3 , _a : Union[str, Any]=64 , _a : Optional[int]=[3, 4, 6, 5] , _a : List[str]=[2, 4, 8, 16] , _a : Any=7 , _a : str=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _a : List[Any]=3.0 , _a : int=True , _a : Tuple=0.0 , _a : Tuple=0.0 , _a : Union[str, Any]=0.1 , _a : Any="gelu" , _a : List[str]=0.02 , _a : int=1E-5 , _a : Optional[int]=0.0 , _a : List[str]=None , _a : Dict=None , **_a : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embed_dim
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =kernel_size
_SCREAMING_SNAKE_CASE =dilations
_SCREAMING_SNAKE_CASE =mlp_ratio
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_SCREAMING_SNAKE_CASE =int(embed_dim * 2 ** (len(_a ) - 1) )
_SCREAMING_SNAKE_CASE =layer_scale_init_value
_SCREAMING_SNAKE_CASE =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_a ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names ) | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__ ( unittest.TestCase ):
UpperCAmelCase = StableDiffusionLDMaDPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_SCREAMING_SNAKE_CASE =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_SCREAMING_SNAKE_CASE =CLIPTextModel(_a )
_SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_SCREAMING_SNAKE_CASE ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCamelCase ( self : Dict , _a : List[str] , _a : Dict=0 ) -> Optional[int]:
"""simple docstring"""
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =StableDiffusionLDMaDPipeline(**_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe(**_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output.rgb, output.depth
_SCREAMING_SNAKE_CASE =rgb[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_SCREAMING_SNAKE_CASE =np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
_SCREAMING_SNAKE_CASE =np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =StableDiffusionLDMaDPipeline(**_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =3 * [inputs['''prompt''']]
# forward
_SCREAMING_SNAKE_CASE =ldmad_pipe(**_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output.rgb, output.depth
_SCREAMING_SNAKE_CASE =rgb_slice_a[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =depth_slice_a[0, -3:, -1]
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =3 * [inputs.pop('''prompt''' )]
_SCREAMING_SNAKE_CASE =ldmad_pipe.tokenizer(
_a , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE =text_inputs['''input_ids'''].to(_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe.text_encoder(_a )[0]
_SCREAMING_SNAKE_CASE =prompt_embeds
# forward
_SCREAMING_SNAKE_CASE =ldmad_pipe(**_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output.rgb, output.depth
_SCREAMING_SNAKE_CASE =rgb_slice_a[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a )
_SCREAMING_SNAKE_CASE =StableDiffusionLDMaDPipeline(**_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE ='''french fries'''
_SCREAMING_SNAKE_CASE =ldmad_pipe(**_a , negative_prompt=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output.rgb, output.depth
_SCREAMING_SNAKE_CASE =rgb[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_SCREAMING_SNAKE_CASE =np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
_SCREAMING_SNAKE_CASE =np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] , _a : int , _a : Tuple="cpu" , _a : Dict=torch.floataa , _a : Optional[Any]=0 ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE =np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a ).to(device=_a , dtype=_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
_SCREAMING_SNAKE_CASE =ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_inputs(_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe(**_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output.rgb, output.depth
_SCREAMING_SNAKE_CASE =rgb[0, -3:, -3:, -1].flatten()
_SCREAMING_SNAKE_CASE =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_SCREAMING_SNAKE_CASE =np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
_SCREAMING_SNAKE_CASE =np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] , _a : int , _a : Dict="cpu" , _a : Optional[Any]=torch.floataa , _a : List[str]=0 ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE =np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a ).to(device=_a , dtype=_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_inputs(_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe(**_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output.rgb, output.depth
_SCREAMING_SNAKE_CASE =0.49_55_86
_SCREAMING_SNAKE_CASE =0.33_79_55_15
_SCREAMING_SNAKE_CASE =1_12.4_85_18
_SCREAMING_SNAKE_CASE =98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_inputs(_a )
_SCREAMING_SNAKE_CASE =ldmad_pipe(**_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output.rgb, output.depth
_SCREAMING_SNAKE_CASE =0.4_19_41_27
_SCREAMING_SNAKE_CASE =0.35_37_55_86
_SCREAMING_SNAKE_CASE =0.5_63_85_02
_SCREAMING_SNAKE_CASE =0.34_68_61_03
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3 | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
import argparse
import os
import re
snake_case_ : List[Any] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
snake_case_ : Dict = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
snake_case_ : Optional[Any] = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
snake_case_ : Optional[int] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
snake_case_ : Any = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
snake_case_ : Optional[int] = re.compile(R'''\[([^\]]+)\]''')
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =_re_indent.search(a__)
return "" if search is None else search.groups()[0]
def lowerCamelCase( a__ ,a__="" ,a__=None ,a__=None):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =code.split('''\n''')
if start_prompt is not None:
while not lines[index].startswith(a__):
index += 1
_SCREAMING_SNAKE_CASE =['''\n'''.join(lines[:index])]
else:
_SCREAMING_SNAKE_CASE =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_SCREAMING_SNAKE_CASE =[lines[index]]
index += 1
while index < len(a__) and (end_prompt is None or not lines[index].startswith(a__)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(a__) > 0 and get_indent(current_block[-1]).startswith(indent_level + ''' '''):
current_block.append(lines[index])
blocks.append('''\n'''.join(a__))
if index < len(a__) - 1:
_SCREAMING_SNAKE_CASE =[lines[index + 1]]
index += 1
else:
_SCREAMING_SNAKE_CASE =[]
else:
blocks.append('''\n'''.join(a__))
_SCREAMING_SNAKE_CASE =[lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(a__) > 0:
blocks.append('''\n'''.join(a__))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__):
blocks.append('''\n'''.join(lines[index:]))
return blocks
def lowerCamelCase( a__):
def _inner(a__):
return key(a__).lower().replace('''_''' ,'''''')
return _inner
def lowerCamelCase( a__ ,a__=None):
# If no key is provided, we use a noop.
def noop(a__):
return x
if key is None:
_SCREAMING_SNAKE_CASE =noop
# Constants are all uppercase, they go first.
_SCREAMING_SNAKE_CASE =[obj for obj in objects if key(a__).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_SCREAMING_SNAKE_CASE =[obj for obj in objects if key(a__)[0].isupper() and not key(a__).isupper()]
# Functions begin with a lowercase, they go last.
_SCREAMING_SNAKE_CASE =[obj for obj in objects if not key(a__)[0].isupper()]
_SCREAMING_SNAKE_CASE =ignore_underscore(a__)
return sorted(a__ ,key=a__) + sorted(a__ ,key=a__) + sorted(a__ ,key=a__)
def lowerCamelCase( a__):
# This inner function sort imports between [ ].
def _replace(a__):
_SCREAMING_SNAKE_CASE =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
_SCREAMING_SNAKE_CASE =[part.strip().replace('''"''' ,'''''') for part in imports.split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
_SCREAMING_SNAKE_CASE =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(a__)]) + "]"
_SCREAMING_SNAKE_CASE =import_statement.split('''\n''')
if len(a__) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_SCREAMING_SNAKE_CASE =2 if lines[1].strip() == '''[''' else 1
_SCREAMING_SNAKE_CASE =[(i, _re_strip_line.search(a__).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
_SCREAMING_SNAKE_CASE =sort_objects(a__ ,key=lambda a__: x[1])
_SCREAMING_SNAKE_CASE =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(a__) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
_SCREAMING_SNAKE_CASE =_re_bracket_content.sub(_replace ,lines[1])
else:
_SCREAMING_SNAKE_CASE =[part.strip().replace('''"''' ,'''''') for part in lines[1].split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
_SCREAMING_SNAKE_CASE =keys[:-1]
_SCREAMING_SNAKE_CASE =get_indent(lines[1]) + ''', '''.join([f"\"{k}\"" for k in sort_objects(a__)])
return "\n".join(a__)
else:
# Finally we have to deal with imports fitting on one line
_SCREAMING_SNAKE_CASE =_re_bracket_content.sub(_replace ,a__)
return import_statement
def lowerCamelCase( a__ ,a__=True):
with open(a__ ,'''r''') as f:
_SCREAMING_SNAKE_CASE =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_SCREAMING_SNAKE_CASE =split_code_in_indented_blocks(
a__ ,start_prompt='''_import_structure = {''' ,end_prompt='''if TYPE_CHECKING:''')
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(a__) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
_SCREAMING_SNAKE_CASE =main_blocks[block_idx]
_SCREAMING_SNAKE_CASE =block.split('''\n''')
# Get to the start of the imports.
_SCREAMING_SNAKE_CASE =0
while line_idx < len(a__) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_SCREAMING_SNAKE_CASE =len(a__)
else:
line_idx += 1
if line_idx >= len(a__):
continue
# Ignore beginning and last line: they don't contain anything.
_SCREAMING_SNAKE_CASE ='''\n'''.join(block_lines[line_idx:-1])
_SCREAMING_SNAKE_CASE =get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
_SCREAMING_SNAKE_CASE =split_code_in_indented_blocks(a__ ,indent_level=a__)
# We have two categories of import key: list or _import_structure[key].append/extend
_SCREAMING_SNAKE_CASE =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_SCREAMING_SNAKE_CASE =[(pattern.search(a__).groups()[0] if pattern.search(a__) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_SCREAMING_SNAKE_CASE =[(i, key) for i, key in enumerate(a__) if key is not None]
_SCREAMING_SNAKE_CASE =[x[0] for x in sorted(a__ ,key=lambda a__: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[]
for i in range(len(a__)):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i])
else:
_SCREAMING_SNAKE_CASE =sort_objects_in_import(internal_blocks[sorted_indices[count]])
reordered_blocks.append(a__)
count += 1
# And we put our main block back together with its first and last line.
_SCREAMING_SNAKE_CASE ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]])
if code != "\n".join(a__):
if check_only:
return True
else:
print(f"Overwriting {file}.")
with open(a__ ,'''w''') as f:
f.write('''\n'''.join(a__))
def lowerCamelCase( a__=True):
_SCREAMING_SNAKE_CASE =[]
for root, _, files in os.walk(a__):
if "__init__.py" in files:
_SCREAMING_SNAKE_CASE =sort_imports(os.path.join(a__ ,'''__init__.py''') ,check_only=a__)
if result:
_SCREAMING_SNAKE_CASE =[os.path.join(a__ ,'''__init__.py''')]
if len(a__) > 0:
raise ValueError(f"Would overwrite {len(a__)} files, run `make style`.")
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
snake_case_ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase( a__ ,a__=0.999 ,a__="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(a__):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a__):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
_SCREAMING_SNAKE_CASE =[]
for i in range(a__):
_SCREAMING_SNAKE_CASE =i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a__) / alpha_bar_fn(a__) ,a__))
return torch.tensor(a__ ,dtype=torch.floataa)
class A__ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase = 2
@register_to_config
def __init__( self : int , _a : int = 1000 , _a : float = 0.0_00_85 , _a : float = 0.0_12 , _a : str = "linear" , _a : Optional[Union[np.ndarray, List[float]]] = None , _a : str = "epsilon" , _a : str = "linspace" , _a : int = 0 , ) -> Optional[int]:
"""simple docstring"""
if trained_betas is not None:
_SCREAMING_SNAKE_CASE =torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
_SCREAMING_SNAKE_CASE =torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_SCREAMING_SNAKE_CASE =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_SCREAMING_SNAKE_CASE =betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
_SCREAMING_SNAKE_CASE =1.0 - self.betas
_SCREAMING_SNAKE_CASE =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCamelCase ( self : Any , _a : Tuple , _a : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if schedule_timesteps is None:
_SCREAMING_SNAKE_CASE =self.timesteps
_SCREAMING_SNAKE_CASE =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_SCREAMING_SNAKE_CASE =1 if len(_a ) > 1 else 0
else:
_SCREAMING_SNAKE_CASE =timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_SCREAMING_SNAKE_CASE =self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCamelCase ( self : Any , _a : torch.FloatTensor , _a : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.index_for_timestep(_a )
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE =self.sigmas[step_index]
else:
_SCREAMING_SNAKE_CASE =self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE =sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCamelCase ( self : str , _a : int , _a : Union[str, torch.device] = None , _a : Optional[int] = None , ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =num_inference_steps
_SCREAMING_SNAKE_CASE =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_SCREAMING_SNAKE_CASE =np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_SCREAMING_SNAKE_CASE =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE =(np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_SCREAMING_SNAKE_CASE =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE =(np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
_SCREAMING_SNAKE_CASE =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_SCREAMING_SNAKE_CASE =torch.from_numpy(np.log(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =np.interp(_a , np.arange(0 , len(_a ) ) , _a )
_SCREAMING_SNAKE_CASE =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
_SCREAMING_SNAKE_CASE =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_SCREAMING_SNAKE_CASE =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_SCREAMING_SNAKE_CASE =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a ).to(_a )
# interpolate timesteps
_SCREAMING_SNAKE_CASE =self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
_SCREAMING_SNAKE_CASE =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_SCREAMING_SNAKE_CASE =torch.cat([timesteps[:1], interleaved_timesteps] )
_SCREAMING_SNAKE_CASE =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_SCREAMING_SNAKE_CASE =defaultdict(_a )
def __UpperCamelCase ( self : Optional[Any] , _a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =sigma.log()
# get distribution
_SCREAMING_SNAKE_CASE =log_sigma - self.log_sigmas[:, None]
# get sigmas range
_SCREAMING_SNAKE_CASE =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_SCREAMING_SNAKE_CASE =low_idx + 1
_SCREAMING_SNAKE_CASE =self.log_sigmas[low_idx]
_SCREAMING_SNAKE_CASE =self.log_sigmas[high_idx]
# interpolate sigmas
_SCREAMING_SNAKE_CASE =(low - log_sigma) / (low - high)
_SCREAMING_SNAKE_CASE =w.clamp(0 , 1 )
# transform interpolation to time range
_SCREAMING_SNAKE_CASE =(1 - w) * low_idx + w * high_idx
_SCREAMING_SNAKE_CASE =t.view(sigma.shape )
return t
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return self.sample is None
def __UpperCamelCase ( self : str , _a : Union[torch.FloatTensor, np.ndarray] , _a : Union[float, torch.FloatTensor] , _a : Union[torch.FloatTensor, np.ndarray] , _a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.index_for_timestep(_a )
# advance index counter by 1
_SCREAMING_SNAKE_CASE =timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE =self.sigmas[step_index]
_SCREAMING_SNAKE_CASE =self.sigmas_interpol[step_index + 1]
_SCREAMING_SNAKE_CASE =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_SCREAMING_SNAKE_CASE =self.sigmas[step_index - 1]
_SCREAMING_SNAKE_CASE =self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE =sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE =sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_SCREAMING_SNAKE_CASE =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_SCREAMING_SNAKE_CASE =sigma_interpol - sigma_hat
# store for 2nd order step
_SCREAMING_SNAKE_CASE =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_SCREAMING_SNAKE_CASE =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_SCREAMING_SNAKE_CASE =sigma_next - sigma_hat
_SCREAMING_SNAKE_CASE =self.sample
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCamelCase ( self : Optional[Any] , _a : torch.FloatTensor , _a : torch.FloatTensor , _a : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_SCREAMING_SNAKE_CASE =self.timesteps.to(original_samples.device , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE =self.timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE =timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE =[self.index_for_timestep(_a , _a ) for t in timesteps]
_SCREAMING_SNAKE_CASE =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE =sigma.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE =original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = (DPMSolverSinglestepScheduler,)
UpperCAmelCase = (("num_inference_steps", 25),)
def __UpperCamelCase ( self : List[Any] , **_a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**_a )
return config
def __UpperCamelCase ( self : Any , _a : str=0 , **_a : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('''num_inference_steps''' , _a )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**_a )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_SCREAMING_SNAKE_CASE =scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: new_scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , **_a ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Any , _a : List[str]=0 , **_a : Dict ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('''num_inference_steps''' , _a )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_SCREAMING_SNAKE_CASE =scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: new_scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , **_a ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Any , _a : Dict=None , **_a : Optional[Any] ) -> List[str]:
"""simple docstring"""
if scheduler is None:
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**_a )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**_a )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE =model(_a , _a )
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a ).prev_sample
return sample
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_SCREAMING_SNAKE_CASE =50
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter
scheduler.set_timesteps(_a )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_SCREAMING_SNAKE_CASE =model(_a , _a )
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a ).prev_sample
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_SCREAMING_SNAKE_CASE =self.full_loop(scheduler=_a )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
_SCREAMING_SNAKE_CASE =DEISMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =DPMSolverMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =UniPCMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =self.full_loop(scheduler=_a )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type='''dpmsolver++''' , solver_order=_a , solver_type=_a , )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
_SCREAMING_SNAKE_CASE =self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(variance_type=_a )
self.check_over_configs(variance_type='''learned_range''' )
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop()
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop(use_karras_sigmas=_a )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop(prediction_type='''v_prediction''' )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_a )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE =model(_a , _a )
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case_ : List[Any] = False
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] , _a : Any=32 ) -> List[Any]:
"""simple docstring"""
set_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDModel(sample_size=_a , in_channels=3 , out_channels=3 )
_SCREAMING_SNAKE_CASE =torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_SCREAMING_SNAKE_CASE =DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_a , )
_SCREAMING_SNAKE_CASE =DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_a , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_SCREAMING_SNAKE_CASE =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_a ) for _ in range(4 )]
_SCREAMING_SNAKE_CASE =[torch.randn((4, 3, 32, 32) ).to(_a ) for _ in range(4 )]
_SCREAMING_SNAKE_CASE =[torch.randint(0 , 1000 , (4,) ).long().to(_a ) for _ in range(4 )]
# train with a DDPM scheduler
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.get_model_optimizer(resolution=32 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
_SCREAMING_SNAKE_CASE =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_SCREAMING_SNAKE_CASE =model(_a , timesteps[i] ).sample
_SCREAMING_SNAKE_CASE =torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.get_model_optimizer(resolution=32 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
_SCREAMING_SNAKE_CASE =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_SCREAMING_SNAKE_CASE =model(_a , timesteps[i] ).sample
_SCREAMING_SNAKE_CASE =torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_a , _a , atol=1E-5 ) )
self.assertTrue(torch.allclose(_a , _a , atol=1E-5 ) ) | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
import string
from math import logaa
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation)).replace('''\n''' ,'''''')
_SCREAMING_SNAKE_CASE =document_without_punctuation.split(''' ''') # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation)) # strip all punctuation and replace it with ''
_SCREAMING_SNAKE_CASE =corpus_without_punctuation.split('''\n''')
_SCREAMING_SNAKE_CASE =term.lower()
return (len([doc for doc in docs if term in doc]), len(a__))
def lowerCamelCase( a__ ,a__ ,a__=False):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(1 + logaa(n / (1 + df)) ,3)
if df == 0:
raise ZeroDivisionError('''df must be > 0''')
elif n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(logaa(n / df) ,3)
def lowerCamelCase( a__ ,a__):
return round(tf * idf ,3) | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
import math
import unittest
def lowerCamelCase( a__):
assert isinstance(a__ ,a__) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(a__) + 1) ,6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(_a ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main() | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Optional[Any] , *_a : List[Any] , **_a : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_SCREAMING_SNAKE_CASE =None
if self.model.config.prefix is not None:
_SCREAMING_SNAKE_CASE =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_SCREAMING_SNAKE_CASE =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._sanitize_parameters(prefix=_a , **self._forward_params )
_SCREAMING_SNAKE_CASE ={**self._preprocess_params, **preprocess_params}
_SCREAMING_SNAKE_CASE ={**self._forward_params, **forward_params}
def __UpperCamelCase ( self : Tuple , _a : List[str]=None , _a : str=None , _a : Optional[int]=None , _a : Union[str, Any]=None , _a : Dict=None , _a : Any=None , _a : str=None , _a : str=None , **_a : int , ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
if prefix is not None:
_SCREAMING_SNAKE_CASE =prefix
if prefix:
_SCREAMING_SNAKE_CASE =self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE =prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
_SCREAMING_SNAKE_CASE =handle_long_generation
preprocess_params.update(_a )
_SCREAMING_SNAKE_CASE =generate_kwargs
_SCREAMING_SNAKE_CASE ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_SCREAMING_SNAKE_CASE =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_SCREAMING_SNAKE_CASE =ReturnType.TENSORS
if return_type is not None:
_SCREAMING_SNAKE_CASE =return_type
if clean_up_tokenization_spaces is not None:
_SCREAMING_SNAKE_CASE =clean_up_tokenization_spaces
if stop_sequence is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_SCREAMING_SNAKE_CASE =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCamelCase ( self : Optional[int] , *_a : int , **_a : Tuple ) -> Dict:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self : Optional[int] , _a : int , **_a : Any ) -> int:
"""simple docstring"""
return super().__call__(_a , **_a )
def __UpperCamelCase ( self : int , _a : Optional[int] , _a : Union[str, Any]="" , _a : Any=None , **_a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE =prompt_text
if handle_long_generation == "hole":
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_SCREAMING_SNAKE_CASE =generate_kwargs['''max_new_tokens''']
else:
_SCREAMING_SNAKE_CASE =generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_SCREAMING_SNAKE_CASE =inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __UpperCamelCase ( self : List[str] , _a : Union[str, Any] , **_a : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =model_inputs['''input_ids''']
_SCREAMING_SNAKE_CASE =model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =1
else:
_SCREAMING_SNAKE_CASE =input_ids.shape[0]
_SCREAMING_SNAKE_CASE =model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_SCREAMING_SNAKE_CASE =generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_SCREAMING_SNAKE_CASE ='''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_SCREAMING_SNAKE_CASE =generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_SCREAMING_SNAKE_CASE ='''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_SCREAMING_SNAKE_CASE =self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_SCREAMING_SNAKE_CASE =generated_sequence.shape[0]
if self.framework == "pt":
_SCREAMING_SNAKE_CASE =generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_SCREAMING_SNAKE_CASE =tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __UpperCamelCase ( self : Dict , _a : Union[str, Any] , _a : List[Any]=ReturnType.FULL_TEXT , _a : List[str]=True ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =model_outputs['''generated_sequence'''][0]
_SCREAMING_SNAKE_CASE =model_outputs['''input_ids''']
_SCREAMING_SNAKE_CASE =model_outputs['''prompt_text''']
_SCREAMING_SNAKE_CASE =generated_sequence.numpy().tolist()
_SCREAMING_SNAKE_CASE =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_SCREAMING_SNAKE_CASE ={'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_SCREAMING_SNAKE_CASE =self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_SCREAMING_SNAKE_CASE =prompt_text + text[prompt_length:]
else:
_SCREAMING_SNAKE_CASE =text[prompt_length:]
_SCREAMING_SNAKE_CASE ={'''generated_text''': all_text}
records.append(_a )
return records | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
from __future__ import annotations
class A__ :
def __init__( self : Tuple , _a : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =data
_SCREAMING_SNAKE_CASE =None
def __repr__( self : List[str] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =self
while temp:
string_rep.append(f"{temp.data}" )
_SCREAMING_SNAKE_CASE =temp.next
return "->".join(_a )
def lowerCamelCase( a__):
if not elements_list:
raise Exception('''The Elements List is empty''')
_SCREAMING_SNAKE_CASE =_SCREAMING_SNAKE_CASE =Node(elements_list[0])
for i in range(1 ,len(a__)):
_SCREAMING_SNAKE_CASE =Node(elements_list[i])
_SCREAMING_SNAKE_CASE =current.next
return head
def lowerCamelCase( a__):
if head_node is not None and isinstance(a__ ,a__):
print_reverse(head_node.next)
print(head_node.data)
def lowerCamelCase( ):
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE =make_linked_list([14, 52, 14, 12, 43])
print('''Linked List:''')
print(a__)
print('''Elements in Reverse:''')
print_reverse(a__)
if __name__ == "__main__":
main() | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __UpperCamelCase ( self : List[Any] , _a : List[Any]=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 128, 128) , rng=random.Random(_a ) )
_SCREAMING_SNAKE_CASE =np.random.RandomState(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_SCREAMING_SNAKE_CASE =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
# warmup pass to apply optimizations
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs() )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_SCREAMING_SNAKE_CASE =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_SCREAMING_SNAKE_CASE =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_SCREAMING_SNAKE_CASE =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ort.SessionOptions()
_SCREAMING_SNAKE_CASE =False
return options
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_SCREAMING_SNAKE_CASE =init_image.resize((768, 512) )
# using the PNDM scheduler by default
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_SCREAMING_SNAKE_CASE =np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_SCREAMING_SNAKE_CASE =init_image.resize((768, 512) )
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_SCREAMING_SNAKE_CASE =np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["input_features", "attention_mask"]
def __init__( self : List[Any] , _a : Any=80 , _a : Any=1_6000 , _a : Union[str, Any]=80 , _a : Optional[int]=0.0 , _a : List[Any]=True , _a : Tuple=True , _a : Tuple=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
_SCREAMING_SNAKE_CASE =num_mel_bins
_SCREAMING_SNAKE_CASE =do_ceptral_normalize
_SCREAMING_SNAKE_CASE =normalize_means
_SCREAMING_SNAKE_CASE =normalize_vars
_SCREAMING_SNAKE_CASE =True
def __UpperCamelCase ( self : Any , _a : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a ).unsqueeze(0 )
_SCREAMING_SNAKE_CASE =ta_kaldi.fbank(_a , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCamelCase ( _a : np.ndarray , _a : int , _a : Optional[bool] = True , _a : Optional[bool] = True , _a : float = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
_SCREAMING_SNAKE_CASE =x[:input_length].mean(axis=0 )
_SCREAMING_SNAKE_CASE =np.subtract(_a , _a )
if normalize_vars:
_SCREAMING_SNAKE_CASE =x[:input_length].std(axis=0 )
_SCREAMING_SNAKE_CASE =np.divide(_a , _a )
if input_length < x.shape[0]:
_SCREAMING_SNAKE_CASE =padding_value
# make sure array is in float32
_SCREAMING_SNAKE_CASE =x.astype(np.floataa )
return x
def __UpperCamelCase ( self : Optional[int] , _a : List[np.ndarray] , _a : Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_a , _a , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_a , _a )
]
def __call__( self : List[Any] , _a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _a : Union[bool, str, PaddingStrategy] = False , _a : Optional[int] = None , _a : bool = False , _a : Optional[int] = None , _a : Optional[Union[str, TensorType]] = None , _a : Optional[int] = None , _a : Optional[bool] = None , **_a : Tuple , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_SCREAMING_SNAKE_CASE =isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_SCREAMING_SNAKE_CASE =is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_SCREAMING_SNAKE_CASE =[np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
_SCREAMING_SNAKE_CASE =np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE =[raw_speech]
# extract fbank features
_SCREAMING_SNAKE_CASE =[self._extract_fbank_features(_a ) for waveform in raw_speech]
# convert into correct format for padding
_SCREAMING_SNAKE_CASE =BatchFeature({'''input_features''': features} )
_SCREAMING_SNAKE_CASE =self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
_SCREAMING_SNAKE_CASE =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
_SCREAMING_SNAKE_CASE =[np.asarray(_a , dtype=np.floataa ) for feature in input_features]
_SCREAMING_SNAKE_CASE =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_SCREAMING_SNAKE_CASE =[np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_SCREAMING_SNAKE_CASE =(
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_SCREAMING_SNAKE_CASE =self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
_SCREAMING_SNAKE_CASE =padded_inputs.convert_to_tensors(_a )
return padded_inputs | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase( a__ ,a__ ,a__=1e-12):
_SCREAMING_SNAKE_CASE =jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(a__ ,axis=1) ,a_min=a__)).T
_SCREAMING_SNAKE_CASE =jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(a__ ,axis=1) ,a_min=a__)).T
return jnp.matmul(a__ ,norm_emb_a.T)
class A__ ( nn.Module ):
UpperCAmelCase = 42
UpperCAmelCase = jnp.floataa
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =FlaxCLIPVisionModule(self.config.vision_config )
_SCREAMING_SNAKE_CASE =nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
_SCREAMING_SNAKE_CASE =self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_SCREAMING_SNAKE_CASE =self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_SCREAMING_SNAKE_CASE =self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
_SCREAMING_SNAKE_CASE =self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[Any] , _a : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.vision_model(_a )[1]
_SCREAMING_SNAKE_CASE =self.visual_projection(_a )
_SCREAMING_SNAKE_CASE =jax_cosine_distance(_a , self.special_care_embeds )
_SCREAMING_SNAKE_CASE =jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_SCREAMING_SNAKE_CASE =jnp.round(_a , 3 )
_SCREAMING_SNAKE_CASE =jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
_SCREAMING_SNAKE_CASE =is_special_care * 0.01
_SCREAMING_SNAKE_CASE =cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_SCREAMING_SNAKE_CASE =jnp.round(_a , 3 )
_SCREAMING_SNAKE_CASE =jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = CLIPConfig
UpperCAmelCase = "clip_input"
UpperCAmelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Tuple , _a : CLIPConfig , _a : Optional[Tuple] = None , _a : int = 0 , _a : jnp.dtype = jnp.floataa , _a : bool = True , **_a : Any , ) -> str:
"""simple docstring"""
if input_shape is None:
_SCREAMING_SNAKE_CASE =(1, 224, 224, 3)
_SCREAMING_SNAKE_CASE =self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def __UpperCamelCase ( self : List[Any] , _a : jax.random.KeyArray , _a : Tuple , _a : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =jax.random.normal(_a , _a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =jax.random.split(_a )
_SCREAMING_SNAKE_CASE ={'''params''': params_rng, '''dropout''': dropout_rng}
_SCREAMING_SNAKE_CASE =self.module.init(_a , _a )['''params''']
return random_params
def __call__( self : str , _a : Union[str, Any] , _a : dict = None , ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , ) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
from datetime import datetime
import requests
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE ='''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
_SCREAMING_SNAKE_CASE =requests.get(base_url + url).json()[0]['''urls'''][0]['''src''']
return requests.get(a__).content
if __name__ == "__main__":
snake_case_ : List[Any] = input('''Enter Video/IGTV url: ''').strip()
snake_case_ : Union[str, Any] = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""") | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
import socket
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =socket.socket(socket.AF_INET ,socket.SOCK_STREAM)
_SCREAMING_SNAKE_CASE =socket.gethostname()
_SCREAMING_SNAKE_CASE =1_2312
sock.connect((host, port))
sock.send(b'''Hello server!''')
with open('''Received_file''' ,'''wb''') as out_file:
print('''File opened''')
print('''Receiving data...''')
while True:
_SCREAMING_SNAKE_CASE =sock.recv(1024)
if not data:
break
out_file.write(a__)
print('''Successfully received the file''')
sock.close()
print('''Connection closed''')
if __name__ == "__main__":
main() | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[1]
for i in range(2 ,a__):
factorials.append(factorials[-1] * i)
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =list(range(a__))
# Find permutation
while factorials:
_SCREAMING_SNAKE_CASE =factorials.pop()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(a__ ,a__)
permutation.append(elements[number])
elements.remove(elements[number])
permutation.append(elements[0])
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
snake_case_ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
snake_case_ : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
snake_case_ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
snake_case_ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
snake_case_ : str = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ : Any = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
snake_case_ : Tuple = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
snake_case_ : Optional[Any] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ :
def __call__( self : int , _a : Union[str, Any] , _a : Optional[str] = None , _a : Optional[str] = None , _a : Union[bool, str] = False , _a : Union[bool, str] = False , _a : Optional[int] = None , _a : Optional[Union[str, TensorType]] = None , _a : Optional[bool] = None , **_a : Dict , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_SCREAMING_SNAKE_CASE =titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_SCREAMING_SNAKE_CASE =titles if not isinstance(_a , _a ) else [titles]
_SCREAMING_SNAKE_CASE =texts if not isinstance(_a , _a ) else [texts]
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
f"There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts." )
_SCREAMING_SNAKE_CASE =super().__call__(_a , _a , padding=_a , truncation=_a )['''input_ids''']
_SCREAMING_SNAKE_CASE =super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )['''input_ids''']
_SCREAMING_SNAKE_CASE ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_SCREAMING_SNAKE_CASE =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_SCREAMING_SNAKE_CASE =attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def __UpperCamelCase ( self : int , _a : BatchEncoding , _a : DPRReaderOutput , _a : int = 16 , _a : int = 64 , _a : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =reader_input['''input_ids''']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =reader_output[:3]
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_SCREAMING_SNAKE_CASE =[]
for doc_id in sorted_docs:
_SCREAMING_SNAKE_CASE =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_SCREAMING_SNAKE_CASE =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_SCREAMING_SNAKE_CASE =sequence_ids.index(self.pad_token_id )
else:
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCamelCase ( self : str , _a : List[int] , _a : List[int] , _a : int , _a : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_SCREAMING_SNAKE_CASE =sorted(_a , key=lambda _a : x[1] , reverse=_a )
_SCREAMING_SNAKE_CASE =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
_SCREAMING_SNAKE_CASE =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = ["input_ids", "attention_mask"] | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A__ ( UpperCamelCase__ ):
def __init__( self : Tuple , _a : Any=0.01 , _a : int=1000 ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =p_stop
_SCREAMING_SNAKE_CASE =max_length
def __iter__( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =False
while not stop and count < self.max_length:
yield count
count += 1
_SCREAMING_SNAKE_CASE =random.random() < self.p_stop
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple , _a : int , _a : Union[str, Any] , _a : Optional[int]=False , _a : Any=True ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
BatchSamplerShard(_a , 2 , _a , split_batches=_a , even_batches=_a )
for i in range(2 )
]
_SCREAMING_SNAKE_CASE =[list(_a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_a ) for shard in batch_sampler_shards] , [len(_a ) for e in expected] )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_a , _a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_a , _a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_a , _a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE =BatchSampler(range(20 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_a , _a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(20 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_a , _a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[], []]
self.check_batch_sampler_shards(_a , _a )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE =BatchSampler(range(20 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(20 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(24 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(22 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(21 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_SCREAMING_SNAKE_CASE =BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =[[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_SCREAMING_SNAKE_CASE =[BatchSamplerShard(_a , 2 , _a , even_batches=_a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __UpperCamelCase ( self : Any , _a : Any , _a : str , _a : Any , _a : Tuple=False , _a : List[str]=2 , _a : Dict=False ) -> List[str]:
"""simple docstring"""
random.seed(_a )
_SCREAMING_SNAKE_CASE =list(_a )
_SCREAMING_SNAKE_CASE =[
IterableDatasetShard(
_a , batch_size=_a , drop_last=_a , num_processes=_a , process_index=_a , split_batches=_a , )
for i in range(_a )
]
_SCREAMING_SNAKE_CASE =[]
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_a )
iterable_dataset_lists.append(list(_a ) )
_SCREAMING_SNAKE_CASE =batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_SCREAMING_SNAKE_CASE =iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_a ) , len(_a ) )
self.assertTrue(len(_a ) % shard_batch_size == 0 )
_SCREAMING_SNAKE_CASE =[]
for idx in range(0 , len(_a ) , _a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_a ) < len(_a ):
reference += reference
self.assertListEqual(_a , reference[: len(_a )] )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =RandomIterableDataset()
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
# Edge case with a very small dataset
_SCREAMING_SNAKE_CASE =RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BatchSampler(range(16 ) , batch_size=4 , drop_last=_a )
_SCREAMING_SNAKE_CASE =SkipBatchSampler(_a , 2 )
self.assertListEqual(list(_a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DataLoader(list(range(16 ) ) , batch_size=4 )
_SCREAMING_SNAKE_CASE =skip_first_batches(_a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
Accelerator()
_SCREAMING_SNAKE_CASE =DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : int , _a : Tuple , _a : Any , _a : Dict , _a : List[Any] , _a : Tuple=1 , _a : int=False ) -> str:
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE =n_token
_SCREAMING_SNAKE_CASE =d_embed
_SCREAMING_SNAKE_CASE =d_proj
_SCREAMING_SNAKE_CASE =cutoffs + [n_token]
_SCREAMING_SNAKE_CASE =[0] + self.cutoffs
_SCREAMING_SNAKE_CASE =div_val
_SCREAMING_SNAKE_CASE =self.cutoffs[0]
_SCREAMING_SNAKE_CASE =len(self.cutoffs ) - 1
_SCREAMING_SNAKE_CASE =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(self.n_clusters ) )
_SCREAMING_SNAKE_CASE =nn.ModuleList()
_SCREAMING_SNAKE_CASE =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
else:
self.out_projs.append(_a )
self.out_layers.append(nn.Linear(_a , _a ) )
else:
for i in range(len(self.cutoffs ) ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.cutoff_ends[i], self.cutoff_ends[i + 1]
_SCREAMING_SNAKE_CASE =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
self.out_layers.append(nn.Linear(_a , r_idx - l_idx ) )
_SCREAMING_SNAKE_CASE =keep_order
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : int , _a : List[str] , _a : Tuple ) -> Tuple:
"""simple docstring"""
if proj is None:
_SCREAMING_SNAKE_CASE =nn.functional.linear(_a , _a , bias=_a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_SCREAMING_SNAKE_CASE =nn.functional.linear(_a , proj.t().contiguous() )
_SCREAMING_SNAKE_CASE =nn.functional.linear(_a , _a , bias=_a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __UpperCamelCase ( self : List[Any] , _a : Union[str, Any] , _a : List[Any]=None , _a : List[Any]=False ) -> Dict:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_SCREAMING_SNAKE_CASE =hidden[..., :-1, :].contiguous()
_SCREAMING_SNAKE_CASE =labels[..., 1:].contiguous()
_SCREAMING_SNAKE_CASE =hidden.view(-1 , hidden.size(-1 ) )
_SCREAMING_SNAKE_CASE =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
_SCREAMING_SNAKE_CASE =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_SCREAMING_SNAKE_CASE =self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_SCREAMING_SNAKE_CASE =labels != -100
_SCREAMING_SNAKE_CASE =torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
_SCREAMING_SNAKE_CASE =(
-nn.functional.log_softmax(_a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_SCREAMING_SNAKE_CASE =nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.cutoff_ends[i], self.cutoff_ends[i + 1]
_SCREAMING_SNAKE_CASE =self.out_layers[0].weight[l_idx:r_idx]
_SCREAMING_SNAKE_CASE =self.out_layers[0].bias[l_idx:r_idx]
else:
_SCREAMING_SNAKE_CASE =self.out_layers[i].weight
_SCREAMING_SNAKE_CASE =self.out_layers[i].bias
if i == 0:
_SCREAMING_SNAKE_CASE =torch.cat([weight_i, self.cluster_weight] , dim=0 )
_SCREAMING_SNAKE_CASE =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =weights[0], biases[0], self.out_projs[0]
_SCREAMING_SNAKE_CASE =self._compute_logit(_a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =nn.functional.log_softmax(_a , dim=1 )
if labels is None:
_SCREAMING_SNAKE_CASE =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_SCREAMING_SNAKE_CASE =torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[0] + self.cutoffs
for i in range(len(_a ) - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_SCREAMING_SNAKE_CASE =(labels >= l_idx) & (labels < r_idx)
_SCREAMING_SNAKE_CASE =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_SCREAMING_SNAKE_CASE =labels.index_select(0 , _a ) - l_idx
_SCREAMING_SNAKE_CASE =head_logprob.index_select(0 , _a )
_SCREAMING_SNAKE_CASE =hidden.index_select(0 , _a )
else:
_SCREAMING_SNAKE_CASE =hidden
if i == 0:
if labels is not None:
_SCREAMING_SNAKE_CASE =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_SCREAMING_SNAKE_CASE =head_logprob[:, : self.cutoffs[0]]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =weights[i], biases[i], self.out_projs[i]
_SCREAMING_SNAKE_CASE =self._compute_logit(_a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =nn.functional.log_softmax(_a , dim=1 )
_SCREAMING_SNAKE_CASE =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_SCREAMING_SNAKE_CASE =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_SCREAMING_SNAKE_CASE =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_SCREAMING_SNAKE_CASE =logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __UpperCamelCase ( self : int , _a : str ) -> Optional[int]:
"""simple docstring"""
if self.n_clusters == 0:
_SCREAMING_SNAKE_CASE =self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.cutoff_ends[i], self.cutoff_ends[i + 1]
_SCREAMING_SNAKE_CASE =self.out_layers[0].weight[l_idx:r_idx]
_SCREAMING_SNAKE_CASE =self.out_layers[0].bias[l_idx:r_idx]
else:
_SCREAMING_SNAKE_CASE =self.out_layers[i].weight
_SCREAMING_SNAKE_CASE =self.out_layers[i].bias
if i == 0:
_SCREAMING_SNAKE_CASE =torch.cat([weight_i, self.cluster_weight] , dim=0 )
_SCREAMING_SNAKE_CASE =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =weights[0], biases[0], self.out_projs[0]
_SCREAMING_SNAKE_CASE =self._compute_logit(_a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =hidden.new_empty((head_logit.size(0 ), self.n_token) )
_SCREAMING_SNAKE_CASE =nn.functional.log_softmax(_a , dim=1 )
_SCREAMING_SNAKE_CASE =[0] + self.cutoffs
for i in range(len(_a ) - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_SCREAMING_SNAKE_CASE =head_logprob[:, : self.cutoffs[0]]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =weights[i], biases[i], self.out_projs[i]
_SCREAMING_SNAKE_CASE =self._compute_logit(_a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =nn.functional.log_softmax(_a , dim=1 )
_SCREAMING_SNAKE_CASE =head_logprob[:, -i] + tail_logprob_i
_SCREAMING_SNAKE_CASE =logprob_i
return out | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( UpperCamelCase__ ):
@staticmethod
@abstractmethod
def __UpperCamelCase ( _a : ArgumentParser ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError() | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
def lowerCamelCase( a__ ,a__):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a__ ,int(b / 2)) * actual_power(a__ ,int(b / 2))
else:
return a * actual_power(a__ ,int(b / 2)) * actual_power(a__ ,int(b / 2))
def lowerCamelCase( a__ ,a__):
if b < 0:
return 1 / actual_power(a__ ,a__)
return actual_power(a__ ,a__)
if __name__ == "__main__":
print(power(-2, -3)) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
snake_case_ : List[str] = logging.get_logger(__name__)
class A__ ( UpperCamelCase__ ):
def __init__( self : str , *_a : int , **_a : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a ) | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
def lowerCamelCase( a__):
if not isinstance(a__ ,a__):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''')
if len(a__) == 0:
raise ValueError('''Input list must be a non empty list''')
if len(a__) == 1:
return True
_SCREAMING_SNAKE_CASE =series[1] - series[0]
for index in range(len(a__) - 1):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase( a__):
if not isinstance(a__ ,a__):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''')
if len(a__) == 0:
raise ValueError('''Input list must be a non empty list''')
_SCREAMING_SNAKE_CASE =0
for val in series:
answer += val
return answer / len(a__)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
import numpy
# List of input, output pairs
snake_case_ : Tuple = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
snake_case_ : List[str] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
snake_case_ : Union[str, Any] = [2, 4, 1, 5]
snake_case_ : Dict = len(train_data)
snake_case_ : List[Any] = 0.009
def lowerCamelCase( a__ ,a__="train"):
return calculate_hypothesis_value(a__ ,a__) - output(
a__ ,a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =0
for i in range(len(a__) - 1):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase( a__ ,a__):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase( a__ ,a__):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
return None
def lowerCamelCase( a__ ,a__=m):
_SCREAMING_SNAKE_CASE =0
for i in range(a__):
if index == -1:
summation_value += _error(a__)
else:
summation_value += _error(a__) * train_data[i][0][index]
return summation_value
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =summation_of_cost_derivative(a__ ,a__) / m
return cost_derivative_value
def lowerCamelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_SCREAMING_SNAKE_CASE =0.00_0002
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
while True:
j += 1
_SCREAMING_SNAKE_CASE =[0, 0, 0, 0]
for i in range(0 ,len(a__)):
_SCREAMING_SNAKE_CASE =get_cost_derivative(i - 1)
_SCREAMING_SNAKE_CASE =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
a__ ,a__ ,atol=a__ ,rtol=a__ ,):
break
_SCREAMING_SNAKE_CASE =temp_parameter_vector
print(('''Number of iterations:''', j))
def lowerCamelCase( ):
for i in range(len(a__)):
print(('''Actual output value:''', output(a__ ,'''test''')))
print(('''Hypothesis output:''', calculate_hypothesis_value(a__ ,'''test''')))
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent() | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = StableDiffusionInpaintPipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase = frozenset([] )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
_SCREAMING_SNAKE_CASE =CLIPTextModel(_a )
_SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_SCREAMING_SNAKE_CASE ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCamelCase ( self : Tuple , _a : List[Any] , _a : Any=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline(**_a )
_SCREAMING_SNAKE_CASE =sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =sd_pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
_SCREAMING_SNAKE_CASE ='''stabilityai/stable-diffusion-2-inpainting'''
_SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
_SCREAMING_SNAKE_CASE ='''stabilityai/stable-diffusion-2-inpainting'''
_SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_SCREAMING_SNAKE_CASE ='''stabilityai/stable-diffusion-2-inpainting'''
_SCREAMING_SNAKE_CASE =PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
_SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9 | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.