code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
A__ = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
A__ = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
AutoTokenizer.from_pretrained(UpperCAmelCase_ ).save_pretrained(UpperCAmelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE_ : Tuple = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'allenai/led-base-16384': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
A__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A__ = bs[:]
A__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
A__ = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self: Dict , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Dict="replace" , UpperCamelCase: List[Any]="<s>" , UpperCamelCase: int="</s>" , UpperCamelCase: Optional[int]="</s>" , UpperCamelCase: Optional[int]="<s>" , UpperCamelCase: Any="<unk>" , UpperCamelCase: Union[str, Any]="<pad>" , UpperCamelCase: List[Any]="<mask>" , UpperCamelCase: str=False , **UpperCamelCase: List[str] , ):
"""simple docstring"""
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
A__ = json.load(UpperCamelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = errors # how to handle errors in decoding
A__ = bytes_to_unicode()
A__ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
A__ = merges_handle.read().split("""\n""" )[1:-1]
A__ = [tuple(merge.split() ) for merge in bpe_merges]
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = {}
A__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase ( self: int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self: Dict , UpperCamelCase: Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCamelCase )
A__ = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
A__ = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCamelCase ):
try:
A__ = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(UpperCamelCase )
A__ = new_word
if len(UpperCamelCase ) == 1:
break
else:
A__ = get_pairs(UpperCamelCase )
A__ = """ """.join(UpperCamelCase )
A__ = word
return word
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = []
for token in re.findall(self.pat , UpperCamelCase ):
A__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase ( self: Dict , UpperCamelCase: int ):
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self: Any , UpperCamelCase: Any ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = """""".join(UpperCamelCase )
A__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
A__ = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A__ = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase ( self: int , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def UpperCamelCase ( self: List[str] , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: int , UpperCamelCase: Dict , UpperCamelCase: str=False , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
A__ = """ """ + text
return (text, kwargs)
def UpperCamelCase ( self: str , UpperCamelCase: Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase: Optional[int] = None , UpperCamelCase: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = super()._pad(
encoded_inputs=UpperCamelCase , max_length=UpperCamelCase , padding_strategy=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A__ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A__ = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCamelCase )
if needs_to_be_padded:
A__ = len(UpperCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A__ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A__ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 335 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 1 |
"""simple docstring"""
import argparse
import os
import re
SCREAMING_SNAKE_CASE_ : List[Any] = 'src/diffusers'
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE_ : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE_ : Dict = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def _snake_case ( UpperCAmelCase_ : Dict ):
A__ = _re_indent.search(UpperCAmelCase_ )
return "" if search is None else search.groups()[0]
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any="" , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=None ):
A__ = 0
A__ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase_ ):
index += 1
A__ = ["""\n""".join(lines[:index] )]
else:
A__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ = [lines[index]]
index += 1
while index < len(UpperCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCAmelCase_ ) )
if index < len(UpperCAmelCase_ ) - 1:
A__ = [lines[index + 1]]
index += 1
else:
A__ = []
else:
blocks.append("""\n""".join(UpperCAmelCase_ ) )
A__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase_ ) > 0:
blocks.append("""\n""".join(UpperCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
def _inner(UpperCAmelCase_ : Optional[int] ):
return key(UpperCAmelCase_ ).lower().replace("""_""" , """""" )
return _inner
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=None ):
# If no key is provided, we use a noop.
def noop(UpperCAmelCase_ : Any ):
return x
if key is None:
A__ = noop
# Constants are all uppercase, they go first.
A__ = [obj for obj in objects if key(UpperCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ = [obj for obj in objects if key(UpperCAmelCase_ )[0].isupper() and not key(UpperCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
A__ = [obj for obj in objects if not key(UpperCAmelCase_ )[0].isupper()]
A__ = ignore_underscore(UpperCAmelCase_ )
return sorted(UpperCAmelCase_ , key=UpperCAmelCase_ ) + sorted(UpperCAmelCase_ , key=UpperCAmelCase_ ) + sorted(UpperCAmelCase_ , key=UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Tuple ):
# This inner function sort imports between [ ].
def _replace(UpperCAmelCase_ : Union[str, Any] ):
A__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
A__ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(UpperCAmelCase_ )] ) + "]"
A__ = import_statement.split("""\n""" )
if len(UpperCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ = 2 if lines[1].strip() == """[""" else 1
A__ = [(i, _re_strip_line.search(UpperCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ = sort_objects(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] )
A__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ = _re_bracket_content.sub(_replace , lines[1] )
else:
A__ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ = keys[:-1]
A__ = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(UpperCAmelCase_ )] )
return "\n".join(UpperCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
A__ = _re_bracket_content.sub(_replace , UpperCAmelCase_ )
return import_statement
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=True ):
with open(UpperCAmelCase_ , """r""" ) as f:
A__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ = split_code_in_indented_blocks(
UpperCAmelCase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ = main_blocks[block_idx]
A__ = block.split("""\n""" )
# Get to the start of the imports.
A__ = 0
while line_idx < len(UpperCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ = len(UpperCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ = """\n""".join(block_lines[line_idx:-1] )
A__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ = split_code_in_indented_blocks(UpperCAmelCase_ , indent_level=UpperCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ = [(pattern.search(UpperCAmelCase_ ).groups()[0] if pattern.search(UpperCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ = [(i, key) for i, key in enumerate(UpperCAmelCase_ ) if key is not None]
A__ = [x[0] for x in sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ = 0
A__ = []
for i in range(len(UpperCAmelCase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
A__ = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(UpperCAmelCase_ , """w""" ) as f:
f.write("""\n""".join(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : Any=True ):
A__ = []
for root, _, files in os.walk(UpperCAmelCase_ ):
if "__init__.py" in files:
A__ = sort_imports(os.path.join(UpperCAmelCase_ , """__init__.py""" ) , check_only=UpperCAmelCase_ )
if result:
A__ = [os.path.join(UpperCAmelCase_ , """__init__.py""" )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(UpperCAmelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 335 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Tuple , UpperCamelCase: UNetaDModel , UpperCamelCase: UNetaDModel , UpperCamelCase: DDPMScheduler , UpperCamelCase: Tuple , ):
"""simple docstring"""
super().__init__()
A__ = value_function
A__ = unet
A__ = scheduler
A__ = env
A__ = env.get_dataset()
A__ = {}
for key in self.data.keys():
try:
A__ = self.data[key].mean()
except: # noqa: E722
pass
A__ = {}
for key in self.data.keys():
try:
A__ = self.data[key].std()
except: # noqa: E722
pass
A__ = env.observation_space.shape[0]
A__ = env.action_space.shape[0]
def UpperCamelCase ( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: Dict ):
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase ( self: str , UpperCamelCase: Any ):
"""simple docstring"""
if type(UpperCamelCase ) is dict:
return {k: self.to_torch(UpperCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCamelCase , device=self.unet.device )
def UpperCamelCase ( self: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
for key, val in cond.items():
A__ = val.clone()
return x_in
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: int ):
"""simple docstring"""
A__ = x.shape[0]
A__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
A__ = torch.full((batch_size,) , UpperCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
A__ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCamelCase ).sample
A__ = torch.autograd.grad([y.sum()] , [x] )[0]
A__ = self.scheduler._get_variance(UpperCamelCase )
A__ = torch.exp(0.5 * posterior_variance )
A__ = model_std * grad
A__ = 0
A__ = x.detach()
A__ = x + scale * grad
A__ = self.reset_xa(UpperCamelCase , UpperCamelCase , self.action_dim )
A__ = self.unet(x.permute(0 , 2 , 1 ) , UpperCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
A__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , predict_epsilon=UpperCamelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
A__ = self.reset_xa(UpperCamelCase , UpperCamelCase , self.action_dim )
A__ = self.to_torch(UpperCamelCase )
return x, y
def __call__( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=64 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: List[Any]=2 , UpperCamelCase: Tuple=0.1 ):
"""simple docstring"""
A__ = self.normalize(UpperCamelCase , """observations""" )
A__ = obs[None].repeat(UpperCamelCase , axis=0 )
A__ = {0: self.to_torch(UpperCamelCase )}
A__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
A__ = randn_tensor(UpperCamelCase , device=self.unet.device )
A__ = self.reset_xa(UpperCamelCase , UpperCamelCase , self.action_dim )
A__ = self.to_torch(UpperCamelCase )
# run the diffusion process
A__ , A__ = self.run_diffusion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# sort output trajectories by value
A__ = y.argsort(0 , descending=UpperCamelCase ).squeeze()
A__ = x[sorted_idx]
A__ = sorted_values[:, :, : self.action_dim]
A__ = actions.detach().cpu().numpy()
A__ = self.de_normalize(UpperCamelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
A__ = 0
else:
# if we didn't run value guiding, select a random action
A__ = np.random.randint(0 , UpperCamelCase )
A__ = denorm_actions[selected_index, 0]
return denorm_actions
| 335 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = OpenAIGPTTokenizer
UpperCAmelCase = OpenAIGPTTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = False
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
return "lower newer", "lower newer"
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
A__ = """lower"""
A__ = ["""low""", """er</w>"""]
A__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
A__ = tokens + ["""<unk>"""]
A__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: int=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
A__ = """This is a simple input"""
A__ = ["""This is a simple input 1""", """This is a simple input 2"""]
A__ = ("""This is a simple input""", """This is a pair""")
A__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class a ( _lowerCamelCase ):
"""simple docstring"""
pass
| 335 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
SCREAMING_SNAKE_CASE_ : int = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
SCREAMING_SNAKE_CASE_ : str = {
'ctrl': 2_5_6,
}
SCREAMING_SNAKE_CASE_ : List[str] = {
'Pregnancy': 1_6_8_6_2_9,
'Christianity': 7_6_7_5,
'Explain': 1_0_6_4_2_3,
'Fitness': 6_3_4_4_0,
'Saving': 6_3_1_6_3,
'Ask': 2_7_1_7_1,
'Ass': 9_5_9_8_5,
'Joke': 1_6_3_5_0_9,
'Questions': 4_5_6_2_2,
'Thoughts': 4_9_6_0_5,
'Retail': 5_2_3_4_2,
'Feminism': 1_6_4_3_3_8,
'Writing': 1_1_9_9_2,
'Atheism': 1_9_2_2_6_3,
'Netflix': 4_8_6_1_6,
'Computing': 3_9_6_3_9,
'Opinion': 4_3_2_1_3,
'Alone': 4_4_9_6_7,
'Funny': 5_8_9_1_7,
'Gaming': 4_0_3_5_8,
'Human': 4_0_8_8,
'India': 1_3_3_1,
'Joker': 7_7_1_3_8,
'Diet': 3_6_2_0_6,
'Legal': 1_1_8_5_9,
'Norman': 4_9_3_9,
'Tip': 7_2_6_8_9,
'Weight': 5_2_3_4_3,
'Movies': 4_6_2_7_3,
'Running': 2_3_4_2_5,
'Science': 2_0_9_0,
'Horror': 3_7_7_9_3,
'Confession': 6_0_5_7_2,
'Finance': 1_2_2_5_0,
'Politics': 1_6_3_6_0,
'Scary': 1_9_1_9_8_5,
'Support': 1_2_6_5_4,
'Technologies': 3_2_5_1_6,
'Teenage': 6_6_1_6_0,
'Event': 3_2_7_6_9,
'Learned': 6_7_4_6_0,
'Notion': 1_8_2_7_7_0,
'Wikipedia': 3_7_5_8_3,
'Books': 6_6_6_5,
'Extract': 7_6_0_5_0,
'Confessions': 1_0_2_7_0_1,
'Conspiracy': 7_5_9_3_2,
'Links': 6_3_6_7_4,
'Narcissus': 1_5_0_4_2_5,
'Relationship': 5_4_7_6_6,
'Relationships': 1_3_4_7_9_6,
'Reviews': 4_1_6_7_1,
'News': 4_2_5_6,
'Translation': 2_6_8_2_0,
'multilingual': 1_2_8_4_0_6,
}
def _snake_case ( UpperCAmelCase_ : Tuple ):
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(UpperCAmelCase_ )
return pairs
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = CONTROL_CODES
def __init__( self: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]="<unk>" , **UpperCamelCase: Tuple ):
"""simple docstring"""
super().__init__(unk_token=UpperCamelCase , **UpperCamelCase )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
A__ = json.load(UpperCamelCase )
A__ = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
A__ = merges_handle.read().split("""\n""" )[1:-1]
A__ = [tuple(merge.split() ) for merge in merges]
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = {}
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self: Dict , UpperCamelCase: List[str] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCamelCase )
A__ = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
A__ = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCamelCase ):
try:
A__ = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(UpperCamelCase )
A__ = new_word
if len(UpperCamelCase ) == 1:
break
else:
A__ = get_pairs(UpperCamelCase )
A__ = """@@ """.join(UpperCamelCase )
A__ = word[:-4]
A__ = word
return word
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = []
A__ = re.findall(r"""\S+\n?""" , UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase ).split(""" """ ) ) )
return split_tokens
def UpperCamelCase ( self: str , UpperCamelCase: List[Any] ):
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase , self.unk_token )
def UpperCamelCase ( self: Dict , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = """ """.join(UpperCamelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
A__ = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A__ = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 335 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."} )
UpperCAmelCase = field(
default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."} )
UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
UpperCAmelCase = field(default=2, metadata={"help": "Batch size for training."} )
UpperCAmelCase = field(default=2, metadata={"help": "Batch size for evaluation."} )
UpperCAmelCase = field(default=0.1, metadata={"help": "Value of weight decay."} )
UpperCAmelCase = field(
default=1_0_0_0_0, metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
UpperCAmelCase = field(default=2E-4, metadata={"help": "Learning rate fo training."} )
UpperCAmelCase = field(default="cosine", metadata={"help": "Learning rate."} )
UpperCAmelCase = field(
default=7_5_0, metadata={"help": "Number of warmup steps in the learning rate schedule."} )
UpperCAmelCase = field(
default=1_6, metadata={"help": "Number of gradient accumulation steps."} )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
UpperCAmelCase = field(default=5_0_0_0_0, metadata={"help": "Maximum number of training steps."} )
UpperCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
UpperCAmelCase = field(default=1_0_2_4, metadata={"help": "Sequence lengths used for training."} )
UpperCAmelCase = field(default=1, metadata={"help": "Training seed."} )
UpperCAmelCase = field(
default=1_0_2_4, metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "States path if the training should continue from a checkpoint folder."} )
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "If True the data is pretokenized."} )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
UpperCAmelCase = field(default=2, metadata={"help": "Batch size used for evaluation."} )
UpperCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
UpperCAmelCase = field(default=1_0_2_4, metadata={"help": "Length of sequences to be evaluated."} )
UpperCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "Number of workers used for code evaluation."} )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "Sample from the language model's output distribution."} )
UpperCAmelCase = field(default=0.2, metadata={"help": "Sampling temperature used for generation."} )
UpperCAmelCase = field(default=2_5_6, metadata={"help": "Maximum number of newly generated tokens."} )
UpperCAmelCase = field(default=0, metadata={"help": "Top-k parameter used for generation."} )
UpperCAmelCase = field(default=0.9_5, metadata={"help": "Top-p parameter used for nucleus sampling."} )
UpperCAmelCase = field(default=1_0, metadata={"help": "Number of generations to run in parallel."} )
UpperCAmelCase = field(
default=2_0_0, metadata={"help": "Number of completions to generate for each sample."} )
UpperCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
UpperCAmelCase = field(
default="eval_results.json", metadata={"help": "Random seed used for evaluation."} )
UpperCAmelCase = field(
default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
UpperCAmelCase = field(
default=-1, metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
}, )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
}, )
UpperCAmelCase = field(
default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."} )
UpperCAmelCase = field(
default="codeparrot-clean", metadata={"help": "Folder to save processed processed dataset."} )
UpperCAmelCase = field(
default=1_0_0_0_0_0, metadata={"help": "Number of files to save per JSON output file."} )
UpperCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
UpperCAmelCase = field(
default=1_0_0_0, metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
UpperCAmelCase = field(
default=1_0_0, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
UpperCAmelCase = field(
default=0.2_5, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
UpperCAmelCase = field(
default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
UpperCAmelCase = field(
default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."} )
UpperCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "If True, near-duplicate samples are removed."} )
UpperCAmelCase = field(
default=0.8_5, metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default="gpt2", metadata={"help": "Base tokenizer to build new tokenizer from."} )
UpperCAmelCase = field(
default="transformersbook/codeparrot-train", metadata={"help": "Dataset to train tokenizer on."} )
UpperCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
UpperCAmelCase = field(default=2_0_0_0_0_0, metadata={"help": "Number of examples to train tokenizer on."} )
UpperCAmelCase = field(
default=3_2_7_6_8, metadata={"help": "Number of examples to train the tokenizer on."} )
UpperCAmelCase = field(default="codeparrot", metadata={"help": "Name of new tokenizer."} )
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."} )
UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."} )
UpperCAmelCase = field(
default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."} )
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default="gpt2-large", metadata={"help": "Configuration to use for model initialization."} )
UpperCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."} )
UpperCAmelCase = field(default="codeparrot", metadata={"help": "Name of the created model."} )
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "Push saved tokenizer to the hub."} )
| 335 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int]=13 , UpperCamelCase: List[Any]=32 , UpperCamelCase: str=2 , UpperCamelCase: List[str]=3 , UpperCamelCase: Dict=16 , UpperCamelCase: Dict=[1, 2, 1] , UpperCamelCase: List[Any]=[2, 2, 4] , UpperCamelCase: Optional[int]=2 , UpperCamelCase: str=2.0 , UpperCamelCase: Tuple=True , UpperCamelCase: Union[str, Any]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: int="gelu" , UpperCamelCase: Any=False , UpperCamelCase: str=True , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=1e-5 , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=None , UpperCamelCase: Dict=True , UpperCamelCase: Tuple=10 , UpperCamelCase: Any=8 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = patch_norm
A__ = layer_norm_eps
A__ = initializer_range
A__ = is_training
A__ = scope
A__ = use_labels
A__ = type_sequence_label_size
A__ = encoder_stride
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase ( self: Any , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = SwinvaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
A__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = SwinvaForMaskedImageModeling(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = SwinvaForMaskedImageModeling(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self: int , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = SwinvaForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCAmelCase = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = SwinvaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , embed_dim=37 )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
A__ = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = config.window_size**2
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
A__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A__ = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: str ):
"""simple docstring"""
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# Swinv2 has a different seq_length
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
A__ , A__ , A__ , A__ = reshaped_hidden_states[0].shape
A__ = (
reshaped_hidden_states[0].view(UpperCamelCase , UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Any ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SwinvaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(UpperCamelCase )
for model_class in self.all_model_classes:
A__ = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
UpperCamelCase )
A__ = self.default_image_processor
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 335 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : int ):
A__ = u
for i in range(1 , UpperCAmelCase_ ):
A__ = temp * (u - i)
return temp
def _snake_case ( ):
A__ = int(input("""enter the numbers of values: """ ) )
A__ = []
for _ in range(UpperCAmelCase_ ):
y.append([] )
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
y[i].append(UpperCAmelCase_ )
A__ = 0
print("""enter the values of parameters in a list: """ )
A__ = list(map(UpperCAmelCase_ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCAmelCase_ ):
A__ = float(input() )
A__ = int(input("""enter the value to interpolate: """ ) )
A__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCAmelCase_ ):
for j in range(n - i ):
A__ = y[j + 1][i - 1] - y[j][i - 1]
A__ = y[0][0]
for i in range(1 , UpperCAmelCase_ ):
summ += (ucal(UpperCAmelCase_ , UpperCAmelCase_ ) * y[0][i]) / math.factorial(UpperCAmelCase_ )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE_ : Any = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _snake_case ( UpperCAmelCase_ : Union[str, Any]=None ):
if subparsers is not None:
A__ = subparsers.add_parser("""tpu-config""" , description=_description )
else:
A__ = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
A__ = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=UpperCAmelCase_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=UpperCAmelCase_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
A__ = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=UpperCAmelCase_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase_ ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
A__ = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase_ ):
A__ = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase_ ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
A__ = """; """.join(UpperCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(UpperCAmelCase_ )}""" )
return
subprocess.run(UpperCAmelCase_ )
print("""Successfully setup pod.""" )
def _snake_case ( ):
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(UpperCAmelCase_ )
| 335 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , *UpperCamelCase: Optional[Any] , **UpperCamelCase: str ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[Any]=None ):
"""simple docstring"""
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self: List[str] , UpperCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase: List[Any] ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: int ):
"""simple docstring"""
A__ = load_image(UpperCamelCase )
A__ = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase ( self: List[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(UpperCamelCase )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(UpperCamelCase , k=UpperCamelCase )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
if n == 0:
return 0
A__ = float("""-inf""" )
for i in range(1 , n + 1 ):
A__ = max(
UpperCAmelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCAmelCase_ ) )
return max_revue
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : list , UpperCAmelCase_ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
A__ = float("""-inf""" )
for i in range(1 , n + 1 ):
A__ = max(
UpperCAmelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCAmelCase_ , UpperCAmelCase_ ) , )
A__ = max_revenue
return max_rev[n]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
A__ = [float("""-inf""" ) for _ in range(n + 1 )]
A__ = 0
for i in range(1 , n + 1 ):
A__ = max_rev[i]
for j in range(1 , i + 1 ):
A__ = max(UpperCAmelCase_ , prices[j - 1] + max_rev[i - j] )
A__ = max_revenue_i
return max_rev[n]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
if n < 0:
A__ = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(UpperCAmelCase_ )
if n > len(UpperCAmelCase_ ):
A__ = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(UpperCAmelCase_ )}"""
)
raise ValueError(UpperCAmelCase_ )
def _snake_case ( ):
A__ = [6, 10, 12, 15, 20, 23]
A__ = len(UpperCAmelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
A__ = 36
A__ = top_down_cut_rod(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = bottom_up_cut_rod(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = naive_cut_rod_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 335 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Tuple = False, False, False
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase = field(default="Audio", init=_lowerCamelCase, repr=_lowerCamelCase )
def __call__( self: str ):
"""simple docstring"""
return self.pa_type
def UpperCamelCase ( self: Dict , UpperCamelCase: Union[str, bytes, dict] ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(UpperCamelCase , UpperCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(UpperCamelCase , UpperCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A__ = BytesIO()
sf.write(UpperCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A__ = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
A__ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_27_67
A__ = BytesIO(bytes() )
sf.write(UpperCamelCase , UpperCamelCase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase ( self: str , UpperCamelCase: dict , UpperCamelCase: Optional[Dict[str, Union[str, bool, None]]] = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
A__ , A__ = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
A__ = xsplitext(UpperCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
A__ = token_per_repo_id or {}
A__ = path.split("""::""" )[-1]
try:
A__ = string_to_dict(UpperCamelCase , config.HUB_DATASETS_URL )["""repo_id"""]
A__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
A__ = None
with xopen(UpperCamelCase , """rb""" , use_auth_token=UpperCamelCase ) as f:
A__ , A__ = sf.read(UpperCamelCase )
else:
A__ , A__ = sf.read(UpperCamelCase )
A__ = array.T
if self.mono:
A__ = librosa.to_mono(UpperCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A__ = librosa.resample(UpperCamelCase , orig_sr=UpperCamelCase , target_sr=self.sampling_rate )
A__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def UpperCamelCase ( self: Tuple , UpperCamelCase: Union[pa.StringArray, pa.StructArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
A__ = pa.array([None] * len(UpperCamelCase ) , type=pa.binary() )
A__ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A__ = pa.array([None] * len(UpperCamelCase ) , type=pa.string() )
A__ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
A__ = pa.array([Audio().encode_example(UpperCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A__ = storage.field("""bytes""" )
else:
A__ = pa.array([None] * len(UpperCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A__ = storage.field("""path""" )
else:
A__ = pa.array([None] * len(UpperCamelCase ) , type=pa.string() )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(UpperCamelCase , self.pa_type )
def UpperCamelCase ( self: int , UpperCamelCase: pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase: Optional[Any] ):
with xopen(UpperCamelCase , """rb""" ) as f:
A__ = f.read()
return bytes_
A__ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A__ = pa.array(
[os.path.basename(UpperCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCamelCase , self.pa_type )
| 335 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = BioGptTokenizer
UpperCAmelCase = False
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def UpperCamelCase ( self: int , UpperCamelCase: Any ):
"""simple docstring"""
A__ = """lower newer"""
A__ = """lower newer"""
return input_text, output_text
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = BioGptTokenizer(self.vocab_file , self.merges_file )
A__ = """lower"""
A__ = ["""low""", """er</w>"""]
A__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
A__ = tokens + ["""<unk>"""]
A__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
@slow
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase )
A__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
A__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = StableDiffusionLatentUpscalePipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase = frozenset([] )
UpperCAmelCase = True
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = 1
A__ = 4
A__ = (16, 16)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=UpperCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=UpperCamelCase , only_cross_attention=UpperCamelCase , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
A__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
A__ = EulerDiscreteScheduler(prediction_type="""sample""" )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""quick_gelu""" , projection_dim=5_12 , )
A__ = CLIPTextModel(UpperCamelCase )
A__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: str , UpperCamelCase: str , UpperCamelCase: List[str]=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("""mps""" ):
A__ = torch.manual_seed(UpperCamelCase )
else:
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
A__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu"""
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = self.get_dummy_inputs(UpperCamelCase )
A__ = pipe(**UpperCamelCase ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
A__ = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase , 1e-3 )
def UpperCamelCase ( self: str ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = self.get_dummy_inputs(UpperCamelCase )
A__ = 2
A__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A__ = getattr(UpperCamelCase , scheduler_enum.name )
A__ = scheduler_cls.from_config(pipe.scheduler.config )
A__ = pipe(**UpperCamelCase )[0]
outputs.append(UpperCamelCase )
assert check_same_shape(UpperCamelCase )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = torch.manual_seed(33 )
A__ = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
A__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
A__ = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
A__ = pipe(UpperCamelCase , generator=UpperCamelCase , output_type="""latent""" ).images
A__ = upscaler(
prompt=UpperCamelCase , image=UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase , output_type="""np""" , ).images[0]
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = torch.manual_seed(33 )
A__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
A__ = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
A__ = upscaler(
prompt=UpperCamelCase , image=UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase , output_type="""np""" , ).images[0]
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[str] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : str = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ : Optional[int] = TypeVar('T')
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self: Dict , UpperCamelCase: bool = True ):
"""simple docstring"""
A__ = {} # dictionary of lists
A__ = directed
def UpperCamelCase ( self: List[Any] , UpperCamelCase: T , UpperCamelCase: T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
self.adj_list[destination_vertex].append(UpperCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
A__ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCamelCase )
A__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A__ = [destination_vertex]
A__ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
A__ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A__ = [destination_vertex]
A__ = []
return self
def __repr__( self: str ):
"""simple docstring"""
return pformat(self.adj_list )
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 50 ):
A__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 335 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "imagegpt"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Optional[Any] , UpperCamelCase: Optional[int]=5_12 + 1 , UpperCamelCase: Union[str, Any]=32 * 32 , UpperCamelCase: List[Any]=5_12 , UpperCamelCase: str=24 , UpperCamelCase: Tuple=8 , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Union[str, Any]="quick_gelu" , UpperCamelCase: Dict=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: str=1e-5 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: Any=True , UpperCamelCase: Optional[int]=True , UpperCamelCase: List[Any]=False , UpperCamelCase: List[Any]=False , UpperCamelCase: Optional[Any]=False , **UpperCamelCase: List[str] , ):
"""simple docstring"""
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = scale_attn_weights
A__ = use_cache
A__ = scale_attn_by_inverse_layer_idx
A__ = reorder_and_upcast_attn
A__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def UpperCamelCase ( self: List[str] , UpperCamelCase: "FeatureExtractionMixin" , UpperCamelCase: int = 1 , UpperCamelCase: int = -1 , UpperCamelCase: bool = False , UpperCamelCase: Optional["TensorType"] = None , UpperCamelCase: int = 3 , UpperCamelCase: int = 32 , UpperCamelCase: int = 32 , ):
"""simple docstring"""
A__ = self._generate_dummy_images(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = dict(preprocessor(images=UpperCamelCase , return_tensors=UpperCamelCase ) )
return inputs
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "swinv2"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Tuple , UpperCamelCase: int=2_24 , UpperCamelCase: Any=4 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=96 , UpperCamelCase: Union[str, Any]=[2, 2, 6, 2] , UpperCamelCase: Any=[3, 6, 12, 24] , UpperCamelCase: int=7 , UpperCamelCase: Dict=4.0 , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[str]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: List[str]=0.1 , UpperCamelCase: Any="gelu" , UpperCamelCase: List[Any]=False , UpperCamelCase: Any=0.02 , UpperCamelCase: Tuple=1e-5 , UpperCamelCase: str=32 , **UpperCamelCase: Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(UpperCamelCase )
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
A__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) )
A__ = (0, 0, 0, 0)
| 335 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE_ : List[str] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=None ):
if rng is None:
A__ = random.Random()
A__ = 1
for dim in shape:
total_dims *= dim
A__ = []
for _ in range(UpperCAmelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A__ = np.array(UpperCAmelCase_ , dtype=jnp.intaa ).reshape(UpperCAmelCase_ )
return output
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=None ):
A__ = ids_tensor(UpperCAmelCase_ , vocab_size=2 , rng=UpperCAmelCase_ )
# make sure that at least one token is attended to for each batch
A__ = 1
return attn_mask
@require_flax
class a :
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = ()
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A__ = 2
A__ = inputs["""input_ids"""].shape[-1] // 2
A__ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
A__ = jnp.ones_like(UpperCamelCase )
A__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
A__ = 0
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(UpperCamelCase , UpperCamelCase )
A__ = pt_model_class(UpperCamelCase ).eval()
A__ = load_flax_weights_in_pytorch_model(UpperCamelCase , flax_model.params )
A__ = flax_model.generate(UpperCamelCase ).sequences
A__ = pt_model.generate(torch.tensor(UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = True
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
A__ = 2
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
A__ = 2
A__ = 2
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = True
A__ = max_length
A__ = 0.8
A__ = 10
A__ = 0.3
A__ = 1
A__ = 8
A__ = 9
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = max_length
A__ = 1
A__ = 8
A__ = 9
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
A__ = max_length
A__ = 2
A__ = 1
A__ = 8
A__ = 9
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
# pad attention mask on the left
A__ = attention_mask.at[(0, 0)].set(0 )
A__ = False
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
# pad attention mask on the left
A__ = attention_mask.at[(0, 0)].set(0 )
A__ = True
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self._get_input_ids_and_config()
# pad attention mask on the left
A__ = attention_mask.at[(0, 0)].set(0 )
A__ = 2
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(UpperCamelCase )
A__ = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
A__ = jit(model.generate )
A__ = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
A__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A__ = """Hello world"""
A__ = tokenizer(UpperCamelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(UpperCamelCase , """do_samples""" ):
model.generate(UpperCamelCase , do_samples=UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(UpperCamelCase , """foo""" ):
A__ = {"""foo""": """bar"""}
model.generate(UpperCamelCase , **UpperCamelCase )
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
A__ = []
create_all_state(1 , UpperCAmelCase_ , UpperCAmelCase_ , [] , UpperCAmelCase_ )
return result
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[list[int]] , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCAmelCase_ , total_number - level + 2 ):
current_list.append(UpperCAmelCase_ )
create_all_state(i + 1 , UpperCAmelCase_ , level - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
current_list.pop()
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
for i in total_list:
print(*UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
SCREAMING_SNAKE_CASE_ : int = 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 335 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE_ : Dict = logging.getLogger(__name__)
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
# save results
if os.path.exists(UpperCAmelCase_ ):
if os.path.exists(os.path.join(UpperCAmelCase_ , """config.json""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , """config.json""" ) ):
os.remove(os.path.join(UpperCAmelCase_ , """config.json""" ) )
if os.path.exists(os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) )
else:
os.makedirs(UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=False ):
A__ = 2
if unlogit:
A__ = torch.pow(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = p * torch.log(UpperCAmelCase_ )
A__ = 0
return -plogp.sum(dim=-1 )
def _snake_case ( UpperCAmelCase_ : str ):
logger.info("""lv, h >\t""" + """\t""".join(F"""{x + 1}""" for x in range(len(UpperCAmelCase_ ) ) ) )
for row in range(len(UpperCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=False ):
A__ , A__ = model.config.num_hidden_layers, model.config.num_attention_heads
A__ = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
A__ = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
if head_mask is None:
A__ = torch.ones(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A__ = None
A__ = 0.0
A__ = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
A__ = tuple(t.to(args.device ) for t in inputs )
((A__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A__ , A__ , A__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase_ ):
A__ = entropy(attn.detach() , UpperCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A__ = 2
A__ = torch.pow(torch.pow(UpperCAmelCase_ , UpperCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
A__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(UpperCAmelCase_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(UpperCAmelCase_ )
logger.info("""Head ranked by importance scores""" )
A__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A__ = torch.arange(
head_importance.numel() , device=args.device )
A__ = head_ranks.view_as(UpperCAmelCase_ )
print_ad_tensor(UpperCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
A__ , A__ , A__ = compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ )
A__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , UpperCAmelCase_ , original_score * args.masking_threshold )
A__ = torch.ones_like(UpperCAmelCase_ )
A__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A__ = original_score
while current_score >= original_score * args.masking_threshold:
A__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A__ = float("""Inf""" )
A__ = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
A__ = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
A__ = new_head_mask.view(-1 )
A__ = 0.0
A__ = new_head_mask.view_as(UpperCAmelCase_ )
A__ = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase_ )
# Compute metric and head importance again
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
A__ = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , UpperCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(UpperCAmelCase_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
A__ = 1 / loss
A__ = datetime.now() - before_time
A__ = sum(p.numel() for p in model.parameters() )
A__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = [
v,
]
assert sum(len(UpperCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase_ )
A__ = sum(p.numel() for p in model.parameters() )
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , actually_pruned=UpperCAmelCase_ , )
A__ = 1 / loss
A__ = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , UpperCAmelCase_ , UpperCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(UpperCAmelCase_ , args.output_dir )
def _snake_case ( ):
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCAmelCase_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=UpperCAmelCase_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=UpperCAmelCase_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=UpperCAmelCase_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=UpperCAmelCase_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=UpperCAmelCase_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=UpperCAmelCase_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=UpperCAmelCase_ , default=42 )
parser.add_argument("""--local_rank""" , type=UpperCAmelCase_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=UpperCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=UpperCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A__ = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
A__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A__ = torch.device("""cuda""" , args.local_rank )
A__ = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A__ = nn.parallel.DistributedDataParallel(
UpperCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase_ )
elif args.n_gpu > 1:
A__ = nn.DataParallel(UpperCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase_ )
# Prepare dataset
A__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A__ = (torch.from_numpy(UpperCAmelCase_ ),)
A__ = TensorDataset(*UpperCAmelCase_ )
A__ = RandomSampler(UpperCAmelCase_ )
A__ = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A__ = mask_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
prune_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 335 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[str] = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( UpperCAmelCase_ : str = "AAPL" ):
A__ = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
A__ = BeautifulSoup(requests.get(UpperCAmelCase_ ).text , """html.parser""" )
A__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 335 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_ : Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ):
for attribute in key.split(""".""" ):
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCAmelCase_ )[0].split(""".""" )[-2]
A__ = mapped_key.replace("""*""" , UpperCAmelCase_ )
if "weight_g" in name:
A__ = """weight_g"""
elif "weight_v" in name:
A__ = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
A__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = """weight"""
else:
A__ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
A__ = full_name.split("""conv_layers.""" )[-1]
A__ = name.split(""".""" )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=None ):
# load the pre-trained checkpoints
A__ = torch.load(UpperCAmelCase_ )
A__ = WavLMConfigOrig(checkpoint["""cfg"""] )
A__ = WavLMOrig(UpperCAmelCase_ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
A__ = WavLMConfig.from_pretrained(UpperCAmelCase_ )
else:
A__ = WavLMConfig()
A__ = WavLMModel(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ )
hf_wavlm.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 335 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = 0
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
A__ = CLIPImageProcessor(**UpperCamelCase )
# save in new folder
model_config.save_pretrained(UpperCamelCase )
config.save_pretrained(UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
# make sure private variable is not incorrectly saved
A__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase , """clip-base is not a local folder and is not a valid model identifier""" ):
A__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase , revision="""aaaaaa""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
A__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase ):
A__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase ):
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase , trust_remote_code=UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoImageProcessor.register(UpperCamelCase , UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoImageProcessor.register(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
A__ = CustomImageProcessor.from_pretrained(UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = True
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoImageProcessor.register(UpperCamelCase , UpperCamelCase )
# If remote code is not set, the default is to use local
A__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(UpperCamelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 335 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Any , UpperCamelCase: Any=13 , UpperCamelCase: int=7 , UpperCamelCase: Tuple=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=2 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: Dict=37 , UpperCamelCase: Tuple="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Any=5_12 , UpperCamelCase: Tuple=16 , UpperCamelCase: int=2 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: str=3 , UpperCamelCase: Any=4 , UpperCamelCase: Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = 99
A__ = 3_84
A__ = 2
A__ = 4
A__ = 37
A__ = """gelu"""
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = 1_28
A__ = 2
A__ = 9
A__ = 1
A__ = None
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: str , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: List[str] , UpperCamelCase: str ):
"""simple docstring"""
A__ = TFConvBertModel(config=UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(UpperCamelCase )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: Dict , UpperCamelCase: Any ):
"""simple docstring"""
A__ = TFConvBertForMaskedLM(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFConvBertForSequenceClassification(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = self.num_choices
A__ = TFConvBertForMultipleChoice(config=UpperCamelCase )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFConvBertForTokenClassification(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = TFConvBertForQuestionAnswering(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = TFConvBertModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = True
if hasattr(UpperCamelCase , """use_cache""" ):
A__ = True
A__ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
A__ = getattr(self.model_tester , """key_length""" , UpperCamelCase )
for model_class in self.all_model_classes:
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
A__ = model_class(UpperCamelCase )
A__ = len(model(UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase , saved_model=UpperCamelCase )
A__ = os.path.join(UpperCamelCase , """saved_model""" , """1""" )
A__ = tf.keras.models.load_model(UpperCamelCase )
A__ = model(UpperCamelCase )
if self.is_encoder_decoder:
A__ = outputs["""encoder_hidden_states"""]
A__ = outputs["""encoder_attentions"""]
else:
A__ = outputs["""hidden_states"""]
A__ = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
A__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
A__ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
A__ = getattr(self.model_tester , """key_length""" , UpperCamelCase )
A__ = getattr(self.model_tester , """key_length""" , UpperCamelCase )
def check_decoder_attentions_output(UpperCamelCase: Union[str, Any] ):
A__ = len(UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
A__ = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase: Tuple ):
A__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = len(UpperCamelCase )
self.assertEqual(config.output_hidden_states , UpperCamelCase )
check_encoder_attentions_output(UpperCamelCase )
if self.is_encoder_decoder:
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase )
check_decoder_attentions_output(UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase )
check_encoder_attentions_output(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase )
check_encoder_attentions_output(UpperCamelCase )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(UpperCamelCase )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , UpperCamelCase )
A__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1e-4 )
| 335 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 1 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Optional[Any]=None , UpperCamelCase: List[str]=None ):
"""simple docstring"""
A__ = list(poly_a or [0] )[:]
A__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
A__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A__ = self.__multiply()
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: int ):
"""simple docstring"""
A__ = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase ) <= 1:
return dft[0]
#
A__ = self.c_max_length // 2
while next_ncol > 0:
A__ = [[] for i in range(UpperCamelCase )]
A__ = self.root**next_ncol
# First half of next step
A__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A__ = new_dft
A__ = next_ncol // 2
return dft[0]
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.__dft("""A""" )
A__ = self.__dft("""B""" )
A__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A__ = 2
while next_ncol <= self.c_max_length:
A__ = [[] for i in range(UpperCamelCase )]
A__ = self.root ** (next_ncol // 2)
A__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A__ = new_inverse_c
next_ncol *= 2
# Unpack
A__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self: str ):
"""simple docstring"""
A__ = """A = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
A__ = """B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
A__ = """A*B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self: Dict , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Node | None , ):
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
A__ = self.g_cost + self.h_cost
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.pos_x - self.goal_x
A__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase ) + abs(UpperCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: List[str] , UpperCamelCase: Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: TPosition , UpperCamelCase: TPosition ):
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , UpperCamelCase )
A__ = [self.start]
A__ = []
A__ = False
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase )
self.closed_nodes.append(UpperCamelCase )
A__ = self.get_successors(UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase )
else:
self.open_nodes.append(UpperCamelCase )
return [self.start.pos]
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Node ):
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase , UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase , ) )
return successors
def UpperCamelCase ( self: Dict , UpperCamelCase: Node | None ):
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: TPosition , UpperCamelCase: TPosition ):
"""simple docstring"""
A__ = AStar(UpperCamelCase , UpperCamelCase )
A__ = AStar(UpperCamelCase , UpperCamelCase )
A__ = False
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ = self.fwd_astar.open_nodes.pop(0 )
A__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase , UpperCamelCase )
self.fwd_astar.closed_nodes.append(UpperCamelCase )
self.bwd_astar.closed_nodes.append(UpperCamelCase )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase )
else:
# retrieve the best current path
A__ = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase )
else:
astar.open_nodes.append(UpperCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Node , UpperCamelCase: Node ):
"""simple docstring"""
A__ = self.fwd_astar.retrace_path(UpperCamelCase )
A__ = self.bwd_astar.retrace_path(UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (0, 0)
SCREAMING_SNAKE_CASE_ : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE_ : Any = time.time()
SCREAMING_SNAKE_CASE_ : Optional[int] = AStar(init, goal)
SCREAMING_SNAKE_CASE_ : Dict = a_star.search()
SCREAMING_SNAKE_CASE_ : int = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
SCREAMING_SNAKE_CASE_ : Tuple = time.time()
SCREAMING_SNAKE_CASE_ : Tuple = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE_ : Any = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 335 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ):
A__ = list(range(len(UpperCAmelCase_ ) ) )
A__ = [v / w for v, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
index.sort(key=lambda UpperCAmelCase_ : ratio[i] , reverse=UpperCAmelCase_ )
A__ = 0
A__ = [0] * len(UpperCAmelCase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self: int , UpperCamelCase: List[Any] , UpperCamelCase: List[str]=3 , UpperCamelCase: str=32 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Any=10 , UpperCamelCase: Tuple=[8, 16, 32, 64] , UpperCamelCase: Tuple=[1, 1, 2, 1] , UpperCamelCase: Tuple=True , UpperCamelCase: Tuple=True , UpperCamelCase: Union[str, Any]="relu" , UpperCamelCase: Any=3 , UpperCamelCase: Optional[int]=None , UpperCamelCase: int=["stage2", "stage3", "stage4"] , UpperCamelCase: Union[str, Any]=[2, 3, 4] , UpperCamelCase: Tuple=1 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(UpperCamelCase )
A__ = out_features
A__ = out_indices
A__ = num_groups
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str ):
"""simple docstring"""
A__ = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A__ = None
A__ = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = BitModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ = layer_type
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _snake_case ( ):
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@require_torch
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = BitModelTester(self )
| 335 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_ : str = logging.getLogger()
def _snake_case ( UpperCAmelCase_ : Path , UpperCAmelCase_ : list ):
A__ = """\n""".join(UpperCAmelCase_ )
Path(UpperCAmelCase_ ).open("""w""" ).writelines(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = 'patrickvonplaten/t5-tiny-random'
SCREAMING_SNAKE_CASE_ : List[str] = 'sshleifer/bart-tiny-random'
SCREAMING_SNAKE_CASE_ : int = 'sshleifer/tiny-mbart'
SCREAMING_SNAKE_CASE_ : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
A__ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
A__ = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(UpperCamelCase , UpperCamelCase )
A__ = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
A__ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
A__ = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(UpperCamelCase , """argv""" , UpperCamelCase ):
run_generate()
assert Path(UpperCamelCase ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.run_eval_tester(UpperCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
self.run_eval_tester(UpperCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
A__ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
A__ = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
A__ = Path(self.get_auto_remove_tmp_dir() )
A__ = str(tmp_dir / """scores.json""" )
A__ = str(tmp_dir / """val.target""" )
_dump_articles(UpperCamelCase , text["""en"""] )
_dump_articles(UpperCamelCase , text["""de"""] )
A__ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
A__ = f"""
run_eval_search.py
{model}
{str(UpperCamelCase )}
{str(UpperCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(UpperCamelCase , """argv""" , UpperCamelCase ):
with CaptureStdout() as cs:
run_search()
A__ = [""" num_beams | length_penalty""", model, """Best score args"""]
A__ = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(UpperCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase ).exists()
os.remove(Path(UpperCamelCase ) )
| 335 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 1 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE_ : Dict = sys.version_info >= (3, 1_0)
def _snake_case ( UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None ):
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = 4_2
UpperCAmelCase = field(default="toto", metadata={"help": "help message"} )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "titi"
UpperCAmelCase = "toto"
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "titi"
UpperCAmelCase = "toto"
UpperCAmelCase = 4_2
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = "toto"
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = BasicEnum(self.foo )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = "toto"
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = MixedTypeEnum(self.foo )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "help message"} )
UpperCAmelCase = None
UpperCAmelCase = list_field(default=[] )
UpperCAmelCase = list_field(default=[] )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = list_field(default=[] )
UpperCAmelCase = list_field(default=[1, 2, 3] )
UpperCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field()
UpperCAmelCase = field()
UpperCAmelCase = field()
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = BasicEnum(self.required_enum )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = field()
UpperCAmelCase = None
UpperCAmelCase = field(default="toto", metadata={"help": "help message"} )
UpperCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "help message"} )
UpperCAmelCase = None
UpperCAmelCase = list_field(default=[] )
UpperCAmelCase = list_field(default=[] )
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: List[Any] , UpperCamelCase: argparse.ArgumentParser , UpperCamelCase: argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A__ = {k: v for k, v in vars(UpperCamelCase ).items() if k != """container"""}
A__ = {k: v for k, v in vars(UpperCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCamelCase ) and yy.get("""choices""" , UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCamelCase ) , yy["""type"""](UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("""--bar""" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("""--baz""" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("""--flag""" , type=UpperCamelCase , default=UpperCamelCase , const=UpperCamelCase , nargs="""?""" )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
A__ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((A__) , ) = parser.parse_args_into_dataclasses(UpperCamelCase , look_for_args_file=UpperCamelCase )
self.assertFalse(example.flag )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCamelCase , help="""help message""" )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCamelCase , default=UpperCamelCase , const=UpperCamelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCamelCase , default=UpperCamelCase , const=UpperCamelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCamelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCamelCase , default=UpperCamelCase )
A__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase )
for dataclass_type in dataclass_types:
A__ = HfArgumentParser(UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
A__ = parser.parse_args([] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
A__ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
A__ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
A__ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
A__ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
A__ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A__ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A__ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A__ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
A__ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = "toto"
A__ = HfArgumentParser(UpperCamelCase )
A__ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
A__ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A__ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A__ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCamelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCamelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCamelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
A__ = parser.parse_args([] )
self.assertEqual(
UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
A__ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCamelCase , type=UpperCamelCase )
expected.add_argument("""--bar""" , default=UpperCamelCase , type=UpperCamelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCamelCase , type=UpperCamelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCamelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCamelCase )
A__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase )
for dataclass_type in dataclass_types:
A__ = HfArgumentParser(UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
A__ = parser.parse_args([] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , bar=UpperCamelCase , baz=UpperCamelCase , ces=[] , des=[] ) )
A__ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCamelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("""--required_str""" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCamelCase , )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCamelCase , )
expected.add_argument("""--opt""" , type=UpperCamelCase , default=UpperCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCamelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
A__ = parser.parse_dict(UpperCamelCase )[0]
A__ = BasicExample(**UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCamelCase , parser.parse_dict , UpperCamelCase , allow_extra_keys=UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = os.path.join(UpperCamelCase , """temp_json""" )
os.mkdir(UpperCamelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
A__ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
A__ = BasicExample(**UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
A__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = os.path.join(UpperCamelCase , """temp_yaml""" )
os.mkdir(UpperCamelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCamelCase , UpperCamelCase )
A__ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
A__ = BasicExample(**UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = HfArgumentParser(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self: Optional[int] , UpperCamelCase: Dict=5_02_65 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Dict=12 , UpperCamelCase: List[str]=16 , UpperCamelCase: Union[str, Any]=40_96 , UpperCamelCase: List[Any]="gelu" , UpperCamelCase: List[str]=5_12 , UpperCamelCase: Any=0.1 , UpperCamelCase: int=0.0 , UpperCamelCase: Dict=0.0 , UpperCamelCase: Any=2 , UpperCamelCase: str=0.02 , UpperCamelCase: int=0.0 , UpperCamelCase: int=True , UpperCamelCase: List[str]=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=1 , UpperCamelCase: List[str]=0 , UpperCamelCase: List[Any]=2 , **UpperCamelCase: Tuple , ):
"""simple docstring"""
A__ = vocab_size
A__ = d_model
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = decoder_ffn_dim
A__ = activation_function
A__ = max_position_embeddings
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = init_std
A__ = decoder_layerdrop
A__ = use_cache
A__ = scale_embedding
A__ = use_learned_position_embeddings
A__ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , **UpperCamelCase , )
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "AutoImageProcessor"
UpperCAmelCase = "AutoTokenizer"
def __init__( self: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
super().__init__(UpperCamelCase , UpperCamelCase )
A__ = self.image_processor
def __call__( self: Optional[int] , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , **UpperCamelCase: List[str] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
A__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , *UpperCamelCase: int , **UpperCamelCase: str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , *UpperCamelCase: str , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 335 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _snake_case ( UpperCAmelCase_ : str ):
A__ = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
A__ = Stack()
A__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCAmelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCAmelCase_ )
elif i == ")":
# RULE 4
A__ = operator_stack.peek()
operator_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operators[opr](UpperCAmelCase_ , UpperCAmelCase_ )
operand_stack.push(UpperCAmelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 335 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( UpperCAmelCase_ : float = 0.1 ):
A__ = 3
A__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def _snake_case ( UpperCAmelCase_ : Dict ):
for pegasus_name, hf_name in PATTERNS:
A__ = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def _snake_case ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ):
A__ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
A__ = PegasusConfig(**UpperCAmelCase_ )
A__ = PegasusForConditionalGeneration(UpperCAmelCase_ )
A__ = torch_model.model.state_dict()
A__ = {}
for k, v in tf_weights.items():
A__ = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
A__ = v.T
A__ = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
A__ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
A__ = mapping["""shared.weight"""]
A__ = mapping["""shared.weight"""]
A__ = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
A__ , A__ = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
A__ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _snake_case ( UpperCAmelCase_ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ):
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
A__ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCAmelCase_ , desc="""converting tf checkpoint to dict""" ):
A__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
return tf_weights
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# save tokenizer first
A__ = Path(UpperCAmelCase_ ).parent.name
A__ = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
A__ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
A__ = get_tf_weights_as_numpy(UpperCAmelCase_ )
A__ = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
A__ = task_specific_params
A__ = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
A__ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE_ : int = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE_ : Any = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type="""v_prediction""" )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase , low_res_scheduler=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , max_noise_level=3_50 , )
A__ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
A__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A__ = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type="""v_prediction""" )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase , low_res_scheduler=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , max_noise_level=3_50 , )
A__ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A__ = output.images
assert image.shape[0] == 2
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type="""v_prediction""" )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A__ = unet.half()
A__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase , low_res_scheduler=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , max_noise_level=3_50 , )
A__ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ).images
A__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
A__ = """stabilityai/stable-diffusion-x4-upscaler"""
A__ = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """a cat sitting on a park bench"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
A__ = """stabilityai/stable-diffusion-x4-upscaler"""
A__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """a cat sitting on a park bench"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A__ = """stabilityai/stable-diffusion-x4-upscaler"""
A__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = """a cat sitting on a park bench"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , output_type="""np""" , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 335 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 1 |
"""simple docstring"""
import random
def _snake_case ( UpperCAmelCase_ : int ):
A__ = num - 1
A__ = 0
while s % 2 == 0:
A__ = s // 2
t += 1
for _ in range(5 ):
A__ = random.randrange(2 , num - 1 )
A__ = pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if v != 1:
A__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A__ = i + 1
A__ = (v**2) % num
return True
def _snake_case ( UpperCAmelCase_ : int ):
if num < 2:
return False
A__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : int = 1024 ):
while True:
A__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCAmelCase_ ):
return num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 1 |
"""simple docstring"""
import re
def _snake_case ( UpperCAmelCase_ : str ):
A__ = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(UpperCAmelCase_ , UpperCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 335 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "informer"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: List[str] , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: str = "student_t" , UpperCamelCase: str = "nll" , UpperCamelCase: int = 1 , UpperCamelCase: List[int] = None , UpperCamelCase: Optional[Union[str, bool]] = "mean" , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: int = 64 , UpperCamelCase: int = 32 , UpperCamelCase: int = 32 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: bool = True , UpperCamelCase: str = "gelu" , UpperCamelCase: float = 0.05 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: int = 1_00 , UpperCamelCase: float = 0.02 , UpperCamelCase: Tuple=True , UpperCamelCase: str = "prob" , UpperCamelCase: int = 5 , UpperCamelCase: bool = True , **UpperCamelCase: List[str] , ):
"""simple docstring"""
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ = cardinality
else:
A__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(self.lags_sequence ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
# Informer
A__ = attention_type
A__ = sampling_factor
A__ = distil
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE_ : List[Any] = (3, 9, -1_1, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE_ : Tuple = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class a :
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Iterable[int] ):
"""simple docstring"""
A__ = None
for i in sorted(UpperCamelCase , reverse=UpperCamelCase ):
A__ = Node(UpperCamelCase , self.head )
def __iter__( self: Dict ):
"""simple docstring"""
A__ = self.head
while node:
yield node.data
A__ = node.next_node
def __len__( self: List[Any] ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self: Union[str, Any] ):
"""simple docstring"""
return " -> ".join([str(UpperCamelCase ) for node in self] )
def _snake_case ( UpperCAmelCase_ : SortedLinkedList , UpperCAmelCase_ : SortedLinkedList ):
return SortedLinkedList(list(UpperCAmelCase_ ) + list(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 335 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
A__ = 0
for chara, chara in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 1 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = """ylacombe/bark-small"""
A__ = tempfile.mkdtemp()
A__ = """en_speaker_1"""
A__ = """This is a test string"""
A__ = """speaker_embeddings_path.json"""
A__ = """speaker_embeddings"""
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A__ = 35
A__ = 2
A__ = 8
A__ = {
"""semantic_prompt""": np.ones(UpperCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A__ = processor(text=self.input_string , voice_preset=UpperCamelCase )
A__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCamelCase , **UpperCamelCase )
A__ = processor(text=self.input_string , voice_preset=UpperCamelCase )
A__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=UpperCamelCase )
A__ = processor(text=self.input_string )
A__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 335 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self: str , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
if len(UpperCamelCase ) == 0 or len(UpperCamelCase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(UpperCamelCase ) )
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = [sequences]
A__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Tuple , UpperCamelCase: Union[str, Any]=ZeroShotClassificationArgumentHandler() , *UpperCamelCase: List[str] , **UpperCamelCase: str ):
"""simple docstring"""
A__ = args_parser
super().__init__(*UpperCamelCase , **UpperCamelCase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[int]=True , UpperCamelCase: Optional[int]=True , UpperCamelCase: List[str]=TruncationStrategy.ONLY_FIRST , **UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A__ = self.tokenizer.eos_token
try:
A__ = self.tokenizer(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , )
except Exception as e:
if "too short" in str(UpperCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A__ = self.tokenizer(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
if kwargs.get("""multi_class""" , UpperCamelCase ) is not None:
A__ = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A__ = {}
if "candidate_labels" in kwargs:
A__ = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A__ = kwargs["""hypothesis_template"""]
A__ = {}
if "multi_label" in kwargs:
A__ = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase: Union[str, List[str]] , *UpperCamelCase: Any , **UpperCamelCase: Union[str, Any] , ):
"""simple docstring"""
if len(UpperCamelCase ) == 0:
pass
elif len(UpperCamelCase ) == 1 and "candidate_labels" not in kwargs:
A__ = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Tuple="This example is {}." ):
"""simple docstring"""
A__ , A__ = self._args_parser(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase , UpperCamelCase ) ):
A__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCamelCase ) - 1,
**model_input,
}
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = inputs["""candidate_label"""]
A__ = inputs["""sequence"""]
A__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
A__ = self.model(**UpperCamelCase )
A__ = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any]=False ):
"""simple docstring"""
A__ = [outputs["""candidate_label"""] for outputs in model_outputs]
A__ = [outputs["""sequence"""] for outputs in model_outputs]
A__ = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A__ = logits.shape[0]
A__ = len(UpperCamelCase )
A__ = N // n
A__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A__ = self.entailment_id
A__ = -1 if entailment_id == 0 else 0
A__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
A__ = np.exp(UpperCamelCase ) / np.exp(UpperCamelCase ).sum(-1 , keepdims=UpperCamelCase )
A__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A__ = reshaped_outputs[..., self.entailment_id]
A__ = np.exp(UpperCamelCase ) / np.exp(UpperCamelCase ).sum(-1 , keepdims=UpperCamelCase )
A__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 335 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 1 |
"""simple docstring"""
from math import pi
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 1 |
"""simple docstring"""
import heapq
def _snake_case ( UpperCAmelCase_ : dict ):
A__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCAmelCase_ , [-1 * len(UpperCAmelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A__ = heapq.heappop(UpperCAmelCase_ )[1][0]
chosen_vertices.add(UpperCAmelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A__ = elem[1][1].index(UpperCAmelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCAmelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 335 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ : Tuple = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'Salesforce/codegen-350M-mono': 2_0_4_8,
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = CodeGenTokenizer
def __init__( self: str , UpperCamelCase: Tuple=None , UpperCamelCase: Dict=None , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[int]="<|endoftext|>" , UpperCamelCase: Dict="<|endoftext|>" , UpperCamelCase: str="<|endoftext|>" , UpperCamelCase: str=False , **UpperCamelCase: Tuple , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
if kwargs.pop("""add_bos_token""" , UpperCamelCase ):
A__ = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
A__ = getattr(UpperCamelCase , pre_tok_state.pop("""type""" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**UpperCamelCase )
A__ = add_prefix_space
def UpperCamelCase ( self: int , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: int ):
"""simple docstring"""
A__ = kwargs.get("""is_split_into_words""" , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = kwargs.get("""is_split_into_words""" , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
A__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def UpperCamelCase ( self: int , UpperCamelCase: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase: bool = False , UpperCamelCase: bool = None , UpperCamelCase: Optional[List[str]] = None , **UpperCamelCase: Any , ):
"""simple docstring"""
A__ = super().decode(
token_ids=UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase , **UpperCamelCase , )
if truncate_before_pattern is not None and len(UpperCamelCase ) > 0:
A__ = self.truncate(UpperCamelCase , UpperCamelCase )
return decoded_text
def UpperCamelCase ( self: int , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
def find_re(UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: str ):
A__ = pattern.search(UpperCamelCase , UpperCamelCase )
return m.start() if m else -1
A__ = [re.compile(UpperCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A__ = list(re.finditer("""^print""" , UpperCamelCase , re.MULTILINE ) )
if len(UpperCamelCase ) > 1:
A__ = completion[: prints[1].start()]
A__ = list(re.finditer("""^def""" , UpperCamelCase , re.MULTILINE ) )
if len(UpperCamelCase ) > 1:
A__ = completion[: defs[1].start()]
A__ = 0
A__ = [
pos for pos in [find_re(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase ) > 0:
return completion[: min(UpperCamelCase )]
else:
return completion
| 335 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : List[str] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[Any]=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
A__ , A__ , A__ , A__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
A__ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
A__ = config_class.from_json_file(UpperCAmelCase_ )
A__ = True
A__ = True
print(F"""Building TensorFlow model from configuration: {config}""" )
A__ = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
A__ = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
A__ = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
A__ = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
A__ = torch.load(UpperCAmelCase_ , map_location="""cpu""" )
A__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
A__ = pt_model(**pt_model.dummy_inputs )
A__ = pto[0].numpy()
A__ = tfo[0].numpy()
A__ = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(UpperCAmelCase_ , save_format="""h5""" )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=False , ):
if args_model_type is None:
A__ = list(MODEL_CLASSES.keys() )
else:
A__ = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print("""=""" * 100 )
print(F""" Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
A__ , A__ , A__ , A__ , A__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
A__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
A__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
A__ = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
A__ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
A__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
A__ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
A__ = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
A__ = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 335 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: AutoencoderKL , UpperCamelCase: CLIPTextModel , UpperCamelCase: CLIPTokenizer , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase: StableDiffusionSafetyChecker , UpperCamelCase: CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase: Union[str, List[str]] , UpperCamelCase: int = 5_12 , UpperCamelCase: int = 5_12 , UpperCamelCase: int = 50 , UpperCamelCase: float = 7.5 , UpperCamelCase: Optional[Union[str, List[str]]] = None , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: float = 0.0 , UpperCamelCase: Optional[torch.Generator] = None , UpperCamelCase: Optional[torch.FloatTensor] = None , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , UpperCamelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase: int = 1 , UpperCamelCase: Optional[torch.FloatTensor] = None , **UpperCamelCase: Tuple , ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = 1
elif isinstance(UpperCamelCase , UpperCamelCase ):
A__ = len(UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase , UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(UpperCamelCase )}.""" )
# get prompt text embeddings
A__ = self.tokenizer(
UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , UpperCamelCase , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""""""]
elif type(UpperCamelCase ) is not type(UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase )} !="""
f""" {type(UpperCamelCase )}.""" )
elif isinstance(UpperCamelCase , UpperCamelCase ):
A__ = [negative_prompt]
elif batch_size != len(UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
UpperCamelCase , padding="""max_length""" , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(UpperCamelCase , UpperCamelCase , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(
UpperCamelCase , generator=UpperCamelCase , device="""cpu""" , dtype=UpperCamelCase ).to(self.device )
A__ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device="""cpu""" , dtype=UpperCamelCase ).to(
self.device )
else:
A__ = torch.randn(
UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
A__ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ = latents_reference.to(self.device )
A__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A__ = (latents_shape[3] - latents_shape_reference[3]) // 2
A__ = (latents_shape[2] - latents_shape_reference[2]) // 2
A__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A__ = 0 if dx < 0 else dx
A__ = 0 if dy < 0 else dy
A__ = max(-dx , 0 )
A__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
A__ = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = 1 / 0.18_215 * latents
A__ = self.vae.decode(UpperCamelCase ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A__ = self.feature_extractor(self.numpy_to_pil(UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
A__ , A__ = self.safety_checker(
images=UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A__ = None
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCamelCase , nsfw_content_detected=UpperCamelCase )
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
A__ = a.name
A__ = b.name
A__ = """"""
A__ = """"""
A__ = a == b
A__ = name_a
A__ = name_b
return res
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase_ , UpperCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase_ , UpperCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase_ , UpperCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ):
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = os.path.dirname(UpperCAmelCase_ )
A__ = os.path.basename(UpperCAmelCase_ )
A__ = onnx.load(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(UpperCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase_ )
dup_set.add(UpperCAmelCase_ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase_ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase_ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
A__ = sorted(UpperCAmelCase_ )
_remove_dup_initializers_from_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = """optimized_""" + model_file_name
A__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
onnx.save(UpperCAmelCase_ , UpperCAmelCase_ )
return new_model
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a :
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: Optional[Any]=7 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: List[str]=True , UpperCamelCase: Optional[int]=True , UpperCamelCase: Any=99 , UpperCamelCase: Tuple=64 , UpperCamelCase: Optional[int]=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: List[Any]=4 , UpperCamelCase: Dict=37 , UpperCamelCase: Optional[Any]="gelu" , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: Optional[int]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Any=3 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = embedding_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: int , UpperCamelCase: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = MegatronBertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: List[Any] , UpperCamelCase: str , UpperCamelCase: Any , UpperCamelCase: Any ):
"""simple docstring"""
A__ = MegatronBertForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = MegatronBertForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: int , UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = MegatronBertForNextSentencePrediction(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = MegatronBertForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , next_sentence_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = MegatronBertForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = MegatronBertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = MegatronBertForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = self.num_choices
A__ = MegatronBertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
# test_resize_embeddings = False
UpperCAmelCase = False
def UpperCamelCase ( self: str , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict=False ):
"""simple docstring"""
A__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = MegatronBertModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase )
def _snake_case ( UpperCAmelCase_ : List[Any] ):
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("""Model is not available.""" )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
A__ = os.path.join(os.environ["""MYDIR"""] , UpperCamelCase )
A__ = MegatronBertModel.from_pretrained(UpperCamelCase )
model.to(UpperCamelCase )
model.half()
A__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
A__ = model(UpperCamelCase )[0]
A__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , UpperCamelCase )
A__ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
A__ = output[0, ii, jj]
A__ = expected[3 * ii + jj]
A__ = """ii={} jj={} a={} b={}""".format(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertTrue(math.isclose(UpperCamelCase , UpperCamelCase , rel_tol=UpperCamelCase , abs_tol=UpperCamelCase ) , msg=UpperCamelCase )
| 335 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
def __init__( self: Any , UpperCamelCase: UNetaDModel , UpperCamelCase: KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self: List[str] , UpperCamelCase: int = 1 , UpperCamelCase: int = 50 , UpperCamelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , **UpperCamelCase: Any , ):
"""simple docstring"""
A__ = self.unet.config.sample_size
A__ = (batch_size, 3, img_size, img_size)
A__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ = self.scheduler.schedule[t]
A__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ , A__ = self.scheduler.add_noise_to_input(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ = self.scheduler.step_correct(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
A__ = step_output.prev_sample
A__ = (sample / 2 + 0.5).clamp(0 , 1 )
A__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE_ : Dict = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'hopper-medium-v2'
SCREAMING_SNAKE_CASE_ : Tuple = gym.make(env_name)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE_ : Dict = env.reset()
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = 1_0_0_0
SCREAMING_SNAKE_CASE_ : str = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : int = env.step(denorm_actions)
SCREAMING_SNAKE_CASE_ : List[str] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE_ : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 335 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER", "False" ) ) is not True, reason="Skipping test because should only be run when releasing minor transformers version", )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=UpperCamelCase , )
assert hasattr(self , """env""" )
def UpperCamelCase ( self: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
A__ = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase , instance_count=UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase , py_version="""py36""" , )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.create_estimator(UpperCamelCase )
# run training
estimator.fit()
# result dataframe
A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCamelCase )
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
A__ = """A painting of a squirrel eating a burger"""
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = sd_pipe.prepare_inputs(UpperCamelCase )
A__ = replicate(UpperCamelCase )
A__ = shard(UpperCamelCase )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(UpperCamelCase , jax.device_count() )
A__ = sd_pipe(UpperCamelCase , UpperCamelCase , UpperCamelCase , num_inference_steps=25 , jit=UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = """stabilityai/stable-diffusion-2"""
A__ , A__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase , subfolder="""scheduler""" )
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase , scheduler=UpperCamelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
A__ = scheduler_params
A__ = """A painting of a squirrel eating a burger"""
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = sd_pipe.prepare_inputs(UpperCamelCase )
A__ = replicate(UpperCamelCase )
A__ = shard(UpperCamelCase )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(UpperCamelCase , jax.device_count() )
A__ = sd_pipe(UpperCamelCase , UpperCamelCase , UpperCamelCase , num_inference_steps=25 , jit=UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 335 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: Optional[int]=12 , UpperCamelCase: Any=7 , UpperCamelCase: List[str]=True , UpperCamelCase: str=True , UpperCamelCase: List[str]=True , UpperCamelCase: Union[str, Any]=99 , UpperCamelCase: int=32 , UpperCamelCase: List[Any]=32 , UpperCamelCase: int=2 , UpperCamelCase: Any=4 , UpperCamelCase: Any=37 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: str=5_12 , UpperCamelCase: int=0.02 , UpperCamelCase: Tuple=0 , UpperCamelCase: Optional[int]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=UpperCamelCase )
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , training=UpperCamelCase )
A__ = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Dict=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase )
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _snake_case ( UpperCAmelCase_ : int ):
if hor == 128:
A__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
A__ = (32, 128, 256)
A__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
A__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
A__ = (32, 64, 128, 256)
A__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
A__ = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
A__ = model.state_dict()
A__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
A__ = UNetaDModel(**UpperCAmelCase_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(UpperCAmelCase_ )
hf_value_function.load_state_dict(UpperCAmelCase_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( ):
A__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
A__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
A__ = model
A__ = UNetaDModel(**UpperCAmelCase_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(UpperCAmelCase_ )
hf_value_function.load_state_dict(UpperCAmelCase_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 335 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Path , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , ):
if config_name_or_path is None:
A__ = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
A__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A__ = question_encoder_name_or_path
A__ = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
A__ = RagConfig.from_pretrained(UpperCAmelCase_ )
A__ = AutoConfig.from_pretrained(UpperCAmelCase_ )
A__ = AutoConfig.from_pretrained(UpperCAmelCase_ )
A__ = gen_config
A__ = question_encoder_config
A__ = model_class.from_pretrained_question_encoder_generator(
UpperCAmelCase_ , UpperCAmelCase_ , config=UpperCAmelCase_ )
rag_model.save_pretrained(UpperCAmelCase_ )
# Sanity check.
model_class.from_pretrained(UpperCAmelCase_ )
# Save tokenizers.
A__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
A__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
SCREAMING_SNAKE_CASE_ : List[str] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 335 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE_ : Dict = 5_0_0_0_0_0
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.split(__file__)
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _snake_case ( UpperCAmelCase_ : datasets.Dataset , **UpperCAmelCase_ : str ):
A__ = dataset.map(**UpperCAmelCase_ )
@get_duration
def _snake_case ( UpperCAmelCase_ : datasets.Dataset , **UpperCAmelCase_ : Any ):
A__ = dataset.filter(**UpperCAmelCase_ )
def _snake_case ( ):
A__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
A__ = generate_example_dataset(
os.path.join(UpperCAmelCase_ , """dataset.arrow""" ) , UpperCAmelCase_ , num_examples=UpperCAmelCase_ )
A__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCAmelCase_ )
def tokenize(UpperCAmelCase_ : Optional[int] ):
return tokenizer(examples["""text"""] )
A__ = map(UpperCAmelCase_ )
A__ = map(UpperCAmelCase_ , batched=UpperCAmelCase_ )
A__ = map(UpperCAmelCase_ , function=lambda UpperCAmelCase_ : None , batched=UpperCAmelCase_ )
with dataset.formatted_as(type="""numpy""" ):
A__ = map(UpperCAmelCase_ , function=lambda UpperCAmelCase_ : None , batched=UpperCAmelCase_ )
with dataset.formatted_as(type="""pandas""" ):
A__ = map(UpperCAmelCase_ , function=lambda UpperCAmelCase_ : None , batched=UpperCAmelCase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
A__ = map(UpperCAmelCase_ , function=lambda UpperCAmelCase_ : None , batched=UpperCAmelCase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
A__ = map(UpperCAmelCase_ , function=lambda UpperCAmelCase_ : None , batched=UpperCAmelCase_ )
A__ = map(UpperCAmelCase_ , function=UpperCAmelCase_ , batched=UpperCAmelCase_ )
A__ = filter(UpperCAmelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCAmelCase_ , """wb""" ) as f:
f.write(json.dumps(UpperCAmelCase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 335 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]=8 ):
A__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
A__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MultilingualCLIP , UpperCamelCase: XLMRobertaTokenizer , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase: VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , movq=UpperCamelCase , )
A__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
if latents is None:
A__ = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A__ = latents.to(UpperCamelCase )
A__ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase ( self: Tuple , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = len(UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else 1
# get prompt text embeddings
A__ = self.tokenizer(
UpperCamelCase , padding="""max_length""" , truncation=UpperCamelCase , max_length=77 , return_attention_mask=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors="""pt""" , )
A__ = text_inputs.input_ids
A__ = self.tokenizer(UpperCamelCase , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase , UpperCamelCase ):
A__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ = text_input_ids.to(UpperCamelCase )
A__ = text_inputs.attention_mask.to(UpperCamelCase )
A__ , A__ = self.text_encoder(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase )
A__ = prompt_embeds.repeat_interleave(UpperCamelCase , dim=0 )
A__ = text_encoder_hidden_states.repeat_interleave(UpperCamelCase , dim=0 )
A__ = text_mask.repeat_interleave(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""""""] * batch_size
elif type(UpperCamelCase ) is not type(UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase )} !="""
f""" {type(UpperCamelCase )}.""" )
elif isinstance(UpperCamelCase , UpperCamelCase ):
A__ = [negative_prompt]
elif batch_size != len(UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
A__ = negative_prompt
A__ = self.tokenizer(
UpperCamelCase , padding="""max_length""" , max_length=77 , truncation=UpperCamelCase , return_attention_mask=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors="""pt""" , )
A__ = uncond_input.input_ids.to(UpperCamelCase )
A__ = uncond_input.attention_mask.to(UpperCamelCase )
A__ , A__ = self.text_encoder(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = negative_prompt_embeds.shape[1]
A__ = negative_prompt_embeds.repeat(1 , UpperCamelCase )
A__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase )
A__ = uncond_text_encoder_hidden_states.shape[1]
A__ = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase , 1 )
A__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCamelCase , -1 )
A__ = uncond_text_mask.repeat_interleave(UpperCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
A__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
A__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCamelCase ( self: List[str] , UpperCamelCase: Dict=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A__ = torch.device(f"""cuda:{gpu_id}""" )
A__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A__ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
A__ , A__ = cpu_offload_with_hook(UpperCamelCase , UpperCamelCase , prev_module_hook=UpperCamelCase )
if self.safety_checker is not None:
A__ , A__ = cpu_offload_with_hook(self.safety_checker , UpperCamelCase , prev_module_hook=UpperCamelCase )
# We'll offload the last model manually.
A__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase )
def __call__( self: Optional[int] , UpperCamelCase: Union[str, List[str]] , UpperCamelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase: Optional[Union[str, List[str]]] = None , UpperCamelCase: int = 5_12 , UpperCamelCase: int = 5_12 , UpperCamelCase: int = 1_00 , UpperCamelCase: float = 4.0 , UpperCamelCase: int = 1 , UpperCamelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase: Optional[torch.FloatTensor] = None , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = 1
elif isinstance(UpperCamelCase , UpperCamelCase ):
A__ = len(UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase )}""" )
A__ = self._execution_device
A__ = batch_size * num_images_per_prompt
A__ = guidance_scale > 1.0
A__ , A__ , A__ = self._encode_prompt(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = torch.cat(UpperCamelCase , dim=0 )
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = torch.cat(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
A__ = image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
A__ = negative_image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
A__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase )
A__ = self.scheduler.timesteps
A__ = self.unet.config.in_channels
A__ , A__ = get_new_h_w(UpperCamelCase , UpperCamelCase , self.movq_scale_factor )
# create initial latent
A__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
A__ = self.unet(
sample=UpperCamelCase , timestep=UpperCamelCase , encoder_hidden_states=UpperCamelCase , added_cond_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0]
if do_classifier_free_guidance:
A__ , A__ = noise_pred.split(latents.shape[1] , dim=1 )
A__ , A__ = noise_pred.chunk(2 )
A__ , A__ = variance_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A__ , A__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(
UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase , ).prev_sample
# post-processing
A__ = self.movq.decode(UpperCamelCase , force_not_quantize=UpperCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
A__ = image * 0.5 + 0.5
A__ = image.clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 335 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def _snake_case ( UpperCAmelCase_ : str ):
A__ = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
A__ = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = input('Enter Video/IGTV url: ').strip()
SCREAMING_SNAKE_CASE_ : int = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 335 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
A__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("""RGB""" )
A__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
A__ = transform(UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
return image
def _snake_case ( UpperCAmelCase_ : str ):
if "visual_encoder" in key:
A__ = re.sub("""visual_encoder*""" , """vision_model.encoder""" , UpperCAmelCase_ )
if "blocks" in key:
A__ = re.sub(R"""blocks""" , """layers""" , UpperCAmelCase_ )
if "attn" in key:
A__ = re.sub(R"""attn""" , """self_attn""" , UpperCAmelCase_ )
if "norm1" in key:
A__ = re.sub(R"""norm1""" , """layer_norm1""" , UpperCAmelCase_ )
if "norm2" in key:
A__ = re.sub(R"""norm2""" , """layer_norm2""" , UpperCAmelCase_ )
if "encoder.norm" in key:
A__ = re.sub(R"""encoder.norm""" , """post_layernorm""" , UpperCAmelCase_ )
if "encoder.patch_embed.proj" in key:
A__ = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , UpperCAmelCase_ )
if "encoder.pos_embed" in key:
A__ = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , UpperCAmelCase_ )
if "encoder.cls_token" in key:
A__ = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , UpperCAmelCase_ )
if "self_attn" in key:
A__ = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , UpperCAmelCase_ )
return key
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=None ):
if config_path is not None:
A__ = BlipConfig.from_pretrained(UpperCAmelCase_ )
else:
A__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
A__ = BlipForConditionalGeneration(UpperCAmelCase_ ).eval()
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
A__ = blip_decoder(pretrained=UpperCAmelCase_ , image_size=384 , vit="""base""" )
A__ = pt_model.eval()
A__ = pt_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCAmelCase_ )
A__ = rename_key(UpperCAmelCase_ )
A__ = value
hf_model.load_state_dict(UpperCAmelCase_ )
A__ = 384
A__ = load_demo_image(image_size=UpperCAmelCase_ , device="""cpu""" )
A__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A__ = tokenizer(["""a picture of"""] ).input_ids
A__ = hf_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
A__ = hf_model.generate(UpperCAmelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A__ = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
A__ = blip_vqa(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit="""base""" )
vqa_model.eval()
A__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCAmelCase_ )
A__ = rename_key(UpperCAmelCase_ )
A__ = value
A__ = BlipForQuestionAnswering(UpperCAmelCase_ )
hf_vqa_model.load_state_dict(UpperCAmelCase_ )
A__ = ["""How many dogs are in this image?"""]
A__ = tokenizer(UpperCAmelCase_ , return_tensors="""pt""" ).input_ids
A__ = hf_vqa_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
A__ = blip_itm(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit="""base""" )
itm_model.eval()
A__ = itm_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCAmelCase_ )
A__ = rename_key(UpperCAmelCase_ )
A__ = value
A__ = BlipForImageTextRetrieval(UpperCAmelCase_ )
A__ = ["""A picture of a woman with a dog sitting in a beach"""]
A__ = tokenizer(
UpperCAmelCase_ , return_tensors="""pt""" , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCAmelCase_ )
hf_itm_model.eval()
A__ = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
A__ = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
A__ = MultilingualCLIP(UpperCamelCase )
A__ = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCamelCase , )
A__ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int=0 ):
"""simple docstring"""
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
A__ = np.ones((64, 64) , dtype=np.floataa )
A__ = 0
if str(UpperCamelCase ).startswith("""mps""" ):
A__ = torch.manual_seed(UpperCamelCase )
else:
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
A__ = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = """cpu"""
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCamelCase )
A__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ = np.ones((7_68, 7_68) , dtype=np.floataa )
A__ = 0
A__ = """a hat"""
A__ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
A__ = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
A__ = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
A__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ , A__ = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A__ = pipeline(
UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def _snake_case ( UpperCAmelCase_ : str ):
# word like '180' or '身高' or '神'
for char in word:
A__ = ord(UpperCAmelCase_ )
if not _is_chinese_char(UpperCAmelCase_ ):
return 0
return 1
def _snake_case ( UpperCAmelCase_ : List[str] ):
A__ = set()
for token in tokens:
A__ = len(UpperCAmelCase_ ) > 1 and is_chinese(UpperCAmelCase_ )
if chinese_word:
word_set.add(UpperCAmelCase_ )
A__ = list(UpperCAmelCase_ )
return word_list
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : set() ):
if not chinese_word_set:
return bert_tokens
A__ = max([len(UpperCAmelCase_ ) for w in chinese_word_set] )
A__ = bert_tokens
A__ , A__ = 0, len(UpperCAmelCase_ )
while start < end:
A__ = True
if is_chinese(bert_word[start] ):
A__ = min(end - start , UpperCAmelCase_ )
for i in range(UpperCAmelCase_ , 1 , -1 ):
A__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A__ = """##""" + bert_word[j]
A__ = start + i
A__ = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : LTP , UpperCAmelCase_ : BertTokenizer ):
A__ = []
for i in range(0 , len(UpperCAmelCase_ ) , 100 ):
A__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
A__ = [get_chinese_word(UpperCAmelCase_ ) for r in res]
ltp_res.extend(UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
A__ = []
for i in range(0 , len(UpperCAmelCase_ ) , 100 ):
A__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
A__ = []
for input_ids, chinese_word in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = []
for id in input_ids:
A__ = bert_tokenizer._convert_id_to_token(UpperCAmelCase_ )
input_tokens.append(UpperCAmelCase_ )
A__ = add_sub_symbol(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase_ ):
if token[:2] == "##":
A__ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase_ ) == 1 and _is_chinese_char(ord(UpperCAmelCase_ ) ):
ref_id.append(UpperCAmelCase_ )
ref_ids.append(UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
return ref_ids
def _snake_case ( UpperCAmelCase_ : str ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
A__ = f.readlines()
A__ = [line.strip() for line in data if len(UpperCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A__ = LTP(args.ltp ) # faster in GPU device
A__ = BertTokenizer.from_pretrained(args.bert )
A__ = prepare_ref(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
A__ = [json.dumps(UpperCAmelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
main(args)
| 335 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
from collections import deque
def _snake_case ( UpperCAmelCase_ : str ):
A__ = len(UpperCAmelCase_ )
A__ = deque()
A__ = [False for _ in range(UpperCAmelCase_ )]
A__ = [-1 for _ in range(UpperCAmelCase_ )]
A__ = index_of[:]
def strong_connect(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
A__ = index # the number when this node is seen
A__ = index # lowest rank node reachable from here
index += 1
stack.append(UpperCAmelCase_ )
A__ = True
for w in g[v]:
if index_of[w] == -1:
A__ = strong_connect(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A__ = []
A__ = stack.pop()
A__ = False
component.append(UpperCAmelCase_ )
while w != v:
A__ = stack.pop()
A__ = False
component.append(UpperCAmelCase_ )
components.append(UpperCAmelCase_ )
return index
A__ = []
for v in range(UpperCAmelCase_ ):
if index_of[v] == -1:
strong_connect(UpperCAmelCase_ , 0 , UpperCAmelCase_ )
return components
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ):
A__ = [[] for _ in range(UpperCAmelCase_ )]
for u, v in edges:
g[u].append(UpperCAmelCase_ )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE_ : int = 7
SCREAMING_SNAKE_CASE_ : List[str] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE_ : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE_ : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: str , ):
"""simple docstring"""
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = """gelu"""
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: str ):
"""simple docstring"""
A__ = TFEsmModel(config=UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(UpperCamelCase )
A__ = [input_ids, input_mask]
A__ = model(UpperCamelCase )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: int , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
A__ = True
A__ = TFEsmModel(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
A__ = model(UpperCamelCase )
A__ = [input_ids, input_mask]
A__ = model(UpperCamelCase , encoder_hidden_states=UpperCamelCase )
# Also check the case where encoder outputs are not passed
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
A__ = TFEsmForMaskedLM(config=UpperCamelCase )
A__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFEsmForTokenClassification(config=UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = TFEsmModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEsmModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A__ = model.get_bias()
assert isinstance(UpperCamelCase , UpperCamelCase )
for k, v in name.items():
assert isinstance(UpperCamelCase , tf.Variable )
else:
A__ = model.get_output_embeddings()
assert x is None
A__ = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(UpperCamelCase )[0]
A__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCamelCase )
# compare the actual values for a slice.
A__ = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ = model(UpperCamelCase )[0]
# compare the actual values for a slice.
A__ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , **UpperCamelCase: int ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , **UpperCamelCase: str ):
"""simple docstring"""
A__ = {}
A__ = {}
A__ = {}
# preprocess args
if "points_per_batch" in kwargs:
A__ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A__ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A__ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A__ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A__ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A__ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A__ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A__ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A__ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A__ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A__ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A__ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , *UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any]=None , UpperCamelCase: str=None , **UpperCamelCase: Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase , *UpperCamelCase , num_workers=UpperCamelCase , batch_size=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: Tuple , UpperCamelCase: Any=64 , UpperCamelCase: int = 0 , UpperCamelCase: float = 5_12 / 15_00 , UpperCamelCase: Optional[int] = 32 , UpperCamelCase: Optional[int] = 1 , ):
"""simple docstring"""
A__ = load_image(UpperCamelCase )
A__ = self.image_processor.size["""longest_edge"""]
A__ , A__ , A__ , A__ = self.image_processor.generate_crop_boxes(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.image_processor(images=UpperCamelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A__ = self.get_inference_context()
with inference_context():
A__ = self._ensure_tensor_on_device(UpperCamelCase , device=self.device )
A__ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A__ = image_embeddings
A__ = grid_points.shape[1]
A__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase , UpperCamelCase ):
A__ = grid_points[:, i : i + points_per_batch, :, :]
A__ = input_labels[:, i : i + points_per_batch]
A__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: str=0.88 , UpperCamelCase: str=0.95 , UpperCamelCase: Optional[int]=0 , UpperCamelCase: List[str]=1 , ):
"""simple docstring"""
A__ = model_inputs.pop("""input_boxes""" )
A__ = model_inputs.pop("""is_last""" )
A__ = model_inputs.pop("""original_sizes""" ).tolist()
A__ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A__ = self.model(**UpperCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A__ = model_outputs["""pred_masks"""]
A__ = self.image_processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , binarize=UpperCamelCase )
A__ = model_outputs["""iou_scores"""]
A__ , A__ , A__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: str , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: Optional[Any]=False , UpperCamelCase: Optional[Any]=0.7 , ):
"""simple docstring"""
A__ = []
A__ = []
A__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A__ = torch.cat(UpperCamelCase )
A__ = torch.cat(UpperCamelCase )
A__ , A__ , A__ , A__ = self.image_processor.post_process_for_mask_generation(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = defaultdict(UpperCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase )
A__ = {}
if output_rle_mask:
A__ = rle_mask
if output_bboxes_mask:
A__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ["pixel_values"]
def __init__( self: int , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: bool = True , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = size if size is not None else {"""height""": 3_84, """width""": 3_84}
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Tuple , ):
"""simple docstring"""
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
A__ = (size["""height"""], size["""width"""])
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[int, float] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: int , ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: int , ):
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: bool = None , UpperCamelCase: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase: List[Any] , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
A__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
A__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
A__ = BatchFeature(data={"""pixel_values""": images} , tensor_type=UpperCamelCase )
return encoded_outputs
| 335 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list ):
A__ = 0
while len(UpperCAmelCase_ ) > 1:
A__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
A__ = files.index(min(UpperCAmelCase_ ) )
temp += files[min_index]
files.pop(UpperCAmelCase_ )
files.append(UpperCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
A__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
A__ = [sys.executable] + distributed_args
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _snake_case ( UpperCAmelCase_ : int ):
A__ = []
A__ = 11
A__ = int("""1""" + """0""" * digit_len )
for num in range(UpperCAmelCase_ , UpperCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase_ , UpperCAmelCase_ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
A__ = 10
return solutions
def _snake_case ( UpperCAmelCase_ : int = 2 ):
A__ = 1.0
for fraction in fraction_list(UpperCAmelCase_ ):
A__ = Fraction(UpperCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 335 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case ( UpperCAmelCase_ : int = 100 ):
A__ = factorial(UpperCAmelCase_ )
A__ = split_and_add(UpperCAmelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Tuple , *UpperCamelCase: List[str] , **UpperCamelCase: Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 335 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , *UpperCamelCase: Optional[int] , **UpperCamelCase: str ):
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
"""simple docstring"""
def __init__( self: int ):
"""simple docstring"""
A__ = """"""
A__ = """"""
A__ = []
A__ = 0
A__ = 2_56
A__ = 0
A__ = 0
A__ = 0
A__ = 0
def UpperCamelCase ( self: List[Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = cva.imread(UpperCamelCase , 0 )
A__ = copy.deepcopy(self.img )
A__ , A__ , A__ = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="""x""" )
A__ = np.sum(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
A__ = x[i] / self.k
self.sk += prk
A__ = (self.L - 1) * self.sk
if self.rem != 0:
A__ = int(last % last )
A__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase )
A__ = int(np.ma.count(self.img ) / self.img[1].size )
A__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
A__ = self.img[j][i]
if num != self.last_list[num]:
A__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : str = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
SCREAMING_SNAKE_CASE_ : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 1 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
def _snake_case ( UpperCAmelCase_ : List[str] ):
A__ = R"""\w+[.]\d+"""
A__ = re.findall(UpperCAmelCase_ , UpperCAmelCase_ )
for pat in pats:
A__ = key.replace(UpperCAmelCase_ , """_""".join(pat.split(""".""" ) ) )
return key
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
A__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
A__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A__ = flax_model.init_weights(PRNGKey(UpperCAmelCase_ ) )
A__ = flatten_dict(UpperCAmelCase_ )
A__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A__ = rename_key(UpperCAmelCase_ )
A__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
A__ , A__ = rename_key_and_reshape_tensor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A__ = jnp.asarray(UpperCAmelCase_ )
return unflatten_dict(UpperCAmelCase_ )
| 335 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 335 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 1 |
"""simple docstring"""
import argparse
import struct
import unittest
class a :
"""simple docstring"""
def __init__( self: Dict , UpperCamelCase: bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0X6a09e667,
0Xbb67ae85,
0X3c6ef372,
0Xa54ff53a,
0X510e527f,
0X9b05688c,
0X1f83d9ab,
0X5be0cd19,
]
# Initialize round constants
A__ = [
0X428a2f98,
0X71374491,
0Xb5c0fbcf,
0Xe9b5dba5,
0X3956c25b,
0X59f111f1,
0X923f82a4,
0Xab1c5ed5,
0Xd807aa98,
0X12835b01,
0X243185be,
0X550c7dc3,
0X72be5d74,
0X80deb1fe,
0X9bdc06a7,
0Xc19bf174,
0Xe49b69c1,
0Xefbe4786,
0X0fc19dc6,
0X240ca1cc,
0X2de92c6f,
0X4a7484aa,
0X5cb0a9dc,
0X76f988da,
0X983e5152,
0Xa831c66d,
0Xb00327c8,
0Xbf597fc7,
0Xc6e00bf3,
0Xd5a79147,
0X06ca6351,
0X14292967,
0X27b70a85,
0X2e1b2138,
0X4d2c6dfc,
0X53380d13,
0X650a7354,
0X766a0abb,
0X81c2c92e,
0X92722c85,
0Xa2bfe8a1,
0Xa81a664b,
0Xc24b8b70,
0Xc76c51a3,
0Xd192e819,
0Xd6990624,
0Xf40e3585,
0X106aa070,
0X19a4c116,
0X1e376c08,
0X2748774c,
0X34b0bcb5,
0X391c0cb3,
0X4ed8aa4a,
0X5b9cca4f,
0X682e6ff3,
0X748f82ee,
0X78a5636f,
0X84c87814,
0X8cc70208,
0X90befffa,
0Xa4506ceb,
0Xbef9a3f7,
0Xc67178f2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase ( UpperCamelCase: bytes ):
"""simple docstring"""
A__ = B"""\x80""" + (B"""\x00""" * (63 - (len(UpperCamelCase ) + 8) % 64))
A__ = struct.pack(""">Q""" , (len(UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack(""">16L""" , UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
A__ = self.ror(UpperCamelCase , 6 ) ^ self.ror(UpperCamelCase , 11 ) ^ self.ror(UpperCamelCase , 25 )
A__ = (e & f) ^ ((~e & 0Xffffffff) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
A__ = self.ror(UpperCamelCase , 2 ) ^ self.ror(UpperCamelCase , 13 ) ^ self.ror(UpperCamelCase , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0X100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes )
]
A__ = """""".join([hex(UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
return 0Xffffffff & (value << (32 - rotations)) | (value >> rotations)
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
import hashlib
A__ = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(UpperCamelCase ).hash , hashlib.shaaaa(UpperCamelCase ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
A__ = f.read()
else:
A__ = bytes(UpperCAmelCase_ , """utf-8""" )
print(SHAaaa(UpperCAmelCase_ ).hash )
if __name__ == "__main__":
main()
| 335 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 1 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ : List[str] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = GPTSwaTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = False
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = GPTSwaTokenizer(UpperCamelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = """This is a test"""
A__ = """This is a test"""
return input_text, output_text
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = """<s>"""
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase ) , 20_00 )
def UpperCamelCase ( self: str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = GPTSwaTokenizer(UpperCamelCase )
A__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
A__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
A__ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = GPTSwaTokenizer(UpperCamelCase )
A__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
A__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase )
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
A__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=UpperCamelCase , )
| 335 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE_ : int = 'src/diffusers'
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
SCREAMING_SNAKE_CASE_ : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
SCREAMING_SNAKE_CASE_ : Any = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ : List[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
SCREAMING_SNAKE_CASE_ : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = _re_backend.findall(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 0:
return None
return "_and_".join(UpperCAmelCase_ )
def _snake_case ( ):
with open(os.path.join(UpperCAmelCase_ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.readlines()
# Get to the point we do the actual imports for type checking
A__ = 0
A__ = {}
# Go through the end of the file
while line_index < len(UpperCAmelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
A__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while line_index < len(UpperCAmelCase_ ) and len(lines[line_index] ) > 1:
A__ = lines[line_index]
A__ = _re_single_line_import.search(UpperCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCAmelCase_ ) > 0:
A__ = objects
else:
line_index += 1
return backend_specific_objects
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ):
if name.isupper():
return DUMMY_CONSTANT.format(UpperCAmelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCAmelCase_ , UpperCAmelCase_ )
else:
return DUMMY_CLASS.format(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Optional[Any]=None ):
if backend_specific_objects is None:
A__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
A__ = {}
for backend, objects in backend_specific_objects.items():
A__ = """[""" + """, """.join(F"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]"""
A__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCAmelCase_ , UpperCAmelCase_ ) for o in objects] )
A__ = dummy_file
return dummy_files
def _snake_case ( UpperCAmelCase_ : List[Any]=False ):
A__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
A__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
A__ = os.path.join(UpperCAmelCase_ , """utils""" )
A__ = {
backend: os.path.join(UpperCAmelCase_ , F"""dummy_{short_names.get(UpperCAmelCase_ , UpperCAmelCase_ )}_objects.py""" )
for backend in dummy_files.keys()
}
A__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.read()
else:
A__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(UpperCAmelCase_ , UpperCAmelCase_ )}_objects.py as the main """
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F"""diffusers.utils.dummy_{short_names.get(UpperCAmelCase_ , UpperCAmelCase_ )}_objects.py. Run `make fix-copies` """
"""to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 1 |
"""simple docstring"""
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
SCREAMING_SNAKE_CASE_ : Any = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def UpperCamelCase ( self: Dict , UpperCamelCase: Any , UpperCamelCase: Any ):
"""simple docstring"""
A__ = np.array(UpperCamelCase )
A__ = np.array(UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
A__ = X - np.mean(UpperCamelCase )
A__ = np.cov(reference_distribution.T )
try:
A__ = np.linalg.inv(UpperCamelCase )
except np.linalg.LinAlgError:
A__ = np.linalg.pinv(UpperCamelCase )
A__ = np.dot(UpperCamelCase , UpperCamelCase )
A__ = np.dot(UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 335 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case ( UpperCAmelCase_ : Dict ):
A__ = filter(lambda UpperCAmelCase_ : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.getLogger(__name__)
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ):
if metric == "rouge2":
A__ = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
A__ = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
A__ = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
A__ = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
A__ = ModelCheckpoint(
dirpath=UpperCAmelCase_ , filename=UpperCAmelCase_ , monitor=F"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCAmelCase_ , verbose=UpperCAmelCase_ , )
class a ( pl.Callback ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase )
@rank_zero_only
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule , UpperCamelCase: str , UpperCamelCase: Tuple=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / """test_results.txt"""
A__ = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
A__ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCamelCase )
generations_file.parent.mkdir(exist_ok=UpperCamelCase )
with open(UpperCamelCase , """a+""" ) as writer:
for key in sorted(UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(UpperCamelCase , torch.Tensor ):
A__ = val.item()
A__ = f"""{key}: {val:.6f}\n"""
writer.write(UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
A__ = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(UpperCamelCase )
@rank_zero_only
def UpperCamelCase ( self: str , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase ( self: int , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase , UpperCamelCase , """test""" )
@rank_zero_only
def UpperCamelCase ( self: Dict , UpperCamelCase: pl.Trainer , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ = flax_key_tuple[:-1] + ("""weight""",)
A__ = torch.permute(UpperCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase_ ):
# linear layer
A__ = flax_key_tuple[:-1] + ("""weight""",)
A__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
if "metadata" in layer:
A__ = layer.split("""metadata""" )
A__ = """""".join(split_layer[0] )[:-1]
A__ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ = layer.split("""kvstore""" )
A__ = """""".join(split_layer[0] )[:-1]
A__ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ = layer.split("""/""" )
A__ = """/""".join(split_layer[:-1] )
A__ = (split_layer[-1],)
if "kvstore/path" in layer:
A__ = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ = """file"""
else:
A__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ):
A__ = rename_keys(UpperCAmelCase_ )
A__ = {}
for k, v in current_block.items():
A__ = v
A__ = new_current_block
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str = WEIGHTS_NAME ):
A__ = convert_file_size_to_int(UpperCAmelCase_ )
A__ = []
A__ = {}
A__ = 0
A__ = 0
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ = flatten_dict(UpperCAmelCase_ , sep="""/""" )
A__ = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ = get_key_and_tensorstore_dict(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if curr_real_layer_name in all_layers:
A__ = content
else:
A__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ = torch.tensor(UpperCAmelCase_ )
A__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCAmelCase_ )
A__ = """/""".join(UpperCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ = os.path.join(
UpperCAmelCase_ , weights_name.replace(""".bin""" , F"""-{len(UpperCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ = {}
A__ = 0
A__ = raw_weights.to(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ = os.path.join(UpperCAmelCase_ , weights_name.replace(""".bin""" , F"""-{len(UpperCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ = {}
A__ = {}
for idx, shard in enumerate(UpperCAmelCase_ ):
A__ = weights_name.replace(
""".bin""" , F"""-{idx+1:05d}-of-{len(UpperCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ = os.path.join(UpperCAmelCase_ , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
A__ = shard
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"""total_size""": total_size}
A__ = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + """\n"""
f.write(UpperCAmelCase_ )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _snake_case ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ = TaTokenizer.from_pretrained("""t5-small""" )
A__ = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ = tokenizer(UpperCAmelCase_ , return_tensors="""pt""" ).input_ids
A__ = model.generate(UpperCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 335 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.