code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( _lowerCAmelCase ):
A__ : Optional[int] = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : str = True , snake_case__ : Union[str, Any] = None , snake_case__ : int = PILImageResampling.BILINEAR , snake_case__ : List[str] = True , snake_case__ : List[Any] = None , snake_case__ : int = True , snake_case__ : List[Any] = 1 / 2_55 , snake_case__ : Optional[Any] = True , snake_case__ : int = None , snake_case__ : Optional[Any] = None , **snake_case__ : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 2_56}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="crop_size" )
snake_case : Union[str, Any] = do_resize
snake_case : List[str] = size
snake_case : List[str] = resample
snake_case : Optional[int] = do_center_crop
snake_case : int = crop_size
snake_case : List[Any] = do_rescale
snake_case : str = rescale_factor
snake_case : Dict = do_normalize
snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Optional[int] = PILImageResampling.BICUBIC , snake_case__ : Dict = None , **snake_case__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case : int = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[Any] = None , **snake_case__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str] = None , **snake_case__ : Any ) -> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : str , snake_case__ : Dict = None , **snake_case__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] , snake_case__ : int = None , snake_case__ : Any = None , snake_case__ : str = None , snake_case__ : List[Any] = None , snake_case__ : str = None , snake_case__ : List[Any] = None , snake_case__ : int = None , snake_case__ : List[str] = None , snake_case__ : Optional[Any] = None , snake_case__ : Any = None , snake_case__ : Optional[Any] = None , snake_case__ : Dict = ChannelDimension.FIRST , **snake_case__ : int , ) -> Any:
'''simple docstring'''
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = size if size is not None else self.size
snake_case : Any = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = resample if resample is not None else self.resample
snake_case : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="crop_size" )
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
snake_case : str = image_std if image_std is not None else self.image_std
snake_case : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : int = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
snake_case : Any = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[Any] , snake_case__ : Optional[int] = None ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[int] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : str = logits.argmax(dim=1 )
snake_case : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
__lowerCamelCase = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase ( __lowerCamelCase : str ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def UpperCamelCase ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
print(solution())
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase :
def __init__(self : List[str] , snake_case__ : int , snake_case__ : Optional[int]=13 , snake_case__ : List[str]=7 , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=True , snake_case__ : Optional[Any]=True , snake_case__ : str=99 , snake_case__ : int=32 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : List[str]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Any=0.02 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=4 , snake_case__ : str=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Any = parent
snake_case : Optional[Any] = 13
snake_case : List[str] = 7
snake_case : Dict = True
snake_case : List[Any] = True
snake_case : Optional[Any] = True
snake_case : Any = True
snake_case : str = 99
snake_case : Dict = 3_84
snake_case : Tuple = 2
snake_case : List[str] = 4
snake_case : List[Any] = 37
snake_case : Optional[Any] = 'gelu'
snake_case : Tuple = 0.1
snake_case : Tuple = 0.1
snake_case : List[Any] = 5_12
snake_case : str = 16
snake_case : Union[str, Any] = 2
snake_case : int = 0.02
snake_case : Any = 3
snake_case : Optional[Any] = 4
snake_case : Optional[Any] = 1_28
snake_case : Optional[int] = 2
snake_case : Union[str, Any] = 9
snake_case : List[Any] = 1
snake_case : Any = None
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[int] = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Tuple = None
snake_case : List[Any] = None
snake_case : Tuple = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = TFConvBertModel(config=snake_case__ )
snake_case : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case : int = [input_ids, input_mask]
snake_case : Any = model(snake_case__ )
snake_case : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = TFConvBertForMaskedLM(config=snake_case__ )
snake_case : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = self.num_labels
snake_case : str = TFConvBertForSequenceClassification(config=snake_case__ )
snake_case : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case : Dict = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Any , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Any ) -> Tuple:
'''simple docstring'''
snake_case : str = self.num_choices
snake_case : Tuple = TFConvBertForMultipleChoice(config=snake_case__ )
snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Optional[int] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : str = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : List[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
snake_case : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = self.num_labels
snake_case : Any = TFConvBertForTokenClassification(config=snake_case__ )
snake_case : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : str = TFConvBertForQuestionAnswering(config=snake_case__ )
snake_case : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case : str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
snake_case
) : Union[str, Any] = config_and_inputs
snake_case : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( _a ,_a ,unittest.TestCase ):
A__ : List[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ : Tuple = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : int = False
A__ : List[Any] = False
A__ : List[Any] = False
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = TFConvBertModelTester(self )
snake_case : List[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[Any] = True
snake_case : Union[str, Any] = True
if hasattr(snake_case__ , "use_cache" ):
snake_case : str = True
snake_case : Optional[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case : str = getattr(self.model_tester , "key_length" , snake_case__ )
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ )
snake_case : Tuple = model_class(snake_case__ )
snake_case : Optional[Any] = len(model(snake_case__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ , saved_model=snake_case__ )
snake_case : Union[str, Any] = os.path.join(snake_case__ , "saved_model" , "1" )
snake_case : Optional[Any] = tf.keras.models.load_model(snake_case__ )
snake_case : Dict = model(snake_case__ )
if self.is_encoder_decoder:
snake_case : Tuple = outputs['encoder_hidden_states']
snake_case : Tuple = outputs['encoder_attentions']
else:
snake_case : List[str] = outputs['hidden_states']
snake_case : Dict = outputs['attentions']
self.assertEqual(len(snake_case__ ) , snake_case__ )
snake_case : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : int = True
snake_case : Optional[int] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
snake_case : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case : Union[str, Any] = getattr(self.model_tester , "key_length" , snake_case__ )
snake_case : Union[str, Any] = getattr(self.model_tester , "key_length" , snake_case__ )
def check_decoder_attentions_output(snake_case__ : List[str] ):
snake_case : List[str] = len(snake_case__ )
self.assertEqual(out_len % 2 , 0 )
snake_case : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case__ : List[Any] ):
snake_case : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case : int = True
snake_case : Tuple = False
snake_case : str = model_class(snake_case__ )
snake_case : str = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case : int = len(snake_case__ )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
if self.is_encoder_decoder:
snake_case : str = model_class(snake_case__ )
snake_case : str = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_decoder_attentions_output(snake_case__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case : Union[str, Any] = True
snake_case : List[Any] = model_class(snake_case__ )
snake_case : Any = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
# Check attention is always last and order is fine
snake_case : Tuple = True
snake_case : List[Any] = True
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : List[str] = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case__ ) )
self.assertEqual(model.config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
snake_case : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : Tuple = model(snake_case__ )[0]
snake_case : List[str] = [1, 6, 7_68]
self.assertEqual(output.shape , snake_case__ )
snake_case : List[str] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import os
import pytest
from attr import dataclass
__lowerCamelCase = """us-east-1""" # defaults region
@dataclass
class UpperCAmelCase :
A__ : Union[str, Any] = 42
A__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
A__ : Dict = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
A__ : Optional[Any] = {**hyperparameters, "max_steps": 10_00}
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
return f"""{self.framework}-transfromers-test"""
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( _lowerCAmelCase ):
A__ : Optional[int] = ["pixel_values"]
def __init__(self : List[str] , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case : Union[str, Any] = do_rescale
snake_case : Union[str, Any] = rescale_factor
snake_case : str = do_pad
snake_case : str = pad_size
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> Tuple:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> List[Any]:
'''simple docstring'''
snake_case , snake_case : List[Any] = get_image_size(_lowercase )
snake_case : List[str] = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=_lowercase )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : str , ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : int = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Optional[Any] = pad_size if pad_size is not None else self.pad_size
snake_case : Optional[int] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(_lowercase ) for image in images]
if do_rescale:
snake_case : Optional[int] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(_lowercase , size=_lowercase ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case : List[Any] = {"pixel_values": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=1024 , __lowerCamelCase : List[str]=1024 , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Union[str, Any] ):
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(a__ )
snake_case : int = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path="train" , **a__ )
snake_case : Tuple = tok.pad_token_id
def get_lens(__lowerCamelCase : Any ):
snake_case : Optional[Any] = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
snake_case : List[Any] = []
for batch in dl:
snake_case : Tuple = batch["input_ids"].ne(a__ ).sum(1 ).tolist()
snake_case : List[Any] = batch["labels"].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
snake_case : List[Any] = get_lens(a__ )
snake_case : Tuple = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path="val" , **a__ )
snake_case : Dict = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowerCamelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCamelCase = logging.WARNING
def UpperCamelCase ( ):
snake_case : Any = os.getenv("DATASETS_VERBOSITY" , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase ( ):
return __name__.split("." )[0]
def UpperCamelCase ( ):
return logging.getLogger(_get_library_name() )
def UpperCamelCase ( ):
# Apply our default configuration to the library root logger.
snake_case : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase ( ):
snake_case : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] = None ):
if name is None:
snake_case : List[Any] = _get_library_name()
return logging.getLogger(__a )
def UpperCamelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
_get_library_root_logger().setLevel(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
return set_verbosity(__a )
def UpperCamelCase ( ):
snake_case : Optional[int] = False
def UpperCamelCase ( ):
snake_case : Dict = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCAmelCase :
def __init__(self : int , *snake_case__ : str , **snake_case__ : Dict ) -> Optional[int]: # pylint: disable=unused-argument
'''simple docstring'''
snake_case : Optional[int] = args[0] if args else None
def __iter__(self : List[str] ) -> Any:
'''simple docstring'''
return iter(self._iterator )
def __getattr__(self : int , snake_case__ : int ) -> Tuple:
'''simple docstring'''
def empty_fn(*snake_case__ : int , **snake_case__ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self
def __exit__(self : Optional[int] , snake_case__ : int , snake_case__ : Any , snake_case__ : int ) -> Dict:
'''simple docstring'''
return
__lowerCamelCase = True
class UpperCAmelCase :
def __call__(self : Optional[Any] , *snake_case__ : Optional[Any] , snake_case__ : Tuple=False , **snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def _SCREAMING_SNAKE_CASE (self : Any , *snake_case__ : Optional[int] , **snake_case__ : str ) -> int:
'''simple docstring'''
snake_case : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCamelCase = _tqdm_cls()
def UpperCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase ( ):
global _tqdm_active
snake_case : str = True
def UpperCamelCase ( ):
global _tqdm_active
snake_case : str = False
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
if n == 1 or not isinstance(A__ , A__ ):
return 0
elif n == 2:
return 1
else:
snake_case : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = 2
while digits < n:
index += 1
snake_case : Optional[Any] = len(str(fibonacci(A__ ) ) )
return index
def UpperCamelCase ( __lowerCamelCase : int = 1000 ):
return fibonacci_digits_index(A__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : str , snake_case__ : List[str]=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : str=True , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=True , snake_case__ : Dict=True , snake_case__ : Optional[Any]=99 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=32 , snake_case__ : Any=5 , snake_case__ : Dict=4 , snake_case__ : List[str]=37 , snake_case__ : str="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : Any=3 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : Union[str, Any] = seq_length
snake_case : str = is_training
snake_case : Dict = use_input_mask
snake_case : Tuple = use_token_type_ids
snake_case : str = use_labels
snake_case : int = vocab_size
snake_case : List[str] = hidden_size
snake_case : Dict = embedding_size
snake_case : Dict = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : int = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : Tuple = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : List[str] = num_labels
snake_case : Dict = num_choices
snake_case : List[str] = scope
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = None
snake_case : Tuple = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = MegatronBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
snake_case : Dict = model(snake_case_ , token_type_ids=snake_case_ )
snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = MegatronBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = MegatronBertForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : int = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = MegatronBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Tuple = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = MegatronBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : str = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = MegatronBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Optional[int] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = self.num_labels
snake_case : Optional[int] = MegatronBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.num_labels
snake_case : Dict = MegatronBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str ) -> Dict:
'''simple docstring'''
snake_case : str = self.num_choices
snake_case : Union[str, Any] = MegatronBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
(
snake_case
) : str = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __lowercase ,__lowercase ,unittest.TestCase ):
A__ : Any = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
# test_resize_embeddings = False
A__ : List[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
snake_case : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
snake_case : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = MegatronBertModelTester(self )
snake_case : Any = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ )
def UpperCamelCase ( __lowerCamelCase : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
__lowerCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
snake_case : Tuple = os.path.join(os.environ["MYDIR"] , snake_case_ )
snake_case : Any = MegatronBertModel.from_pretrained(snake_case_ )
model.to(snake_case_ )
model.half()
snake_case : Union[str, Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
snake_case : List[str] = model(snake_case_ )[0]
snake_case : Optional[int] = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , snake_case_ )
snake_case : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
snake_case : Optional[int] = output[0, ii, jj]
snake_case : Any = expected[3 * ii + jj]
snake_case : Union[str, Any] = '''ii={} jj={} a={} b={}'''.format(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
__lowerCamelCase = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
__lowerCamelCase = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class UpperCAmelCase ( A_ ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = ["input_ids", "attention_mask"]
A__ : List[int] = []
A__ : List[int] = []
def __init__(self : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Any="<s>" , snake_case__ : Optional[int]="</s>" , snake_case__ : List[str]="</s>" , snake_case__ : str="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="m2m100" , snake_case__ : Optional[Dict[str, Any]] = None , snake_case__ : List[Any]=8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case : List[str] = language_codes
snake_case : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
snake_case : List[str] = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
snake_case : List[str] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case__ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ , tgt_lang=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , language_codes=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case__ , **snake_case__ , )
snake_case : Optional[Any] = vocab_file
snake_case : List[Any] = load_json(snake_case__ )
snake_case : Tuple = {v: k for k, v in self.encoder.items()}
snake_case : List[str] = spm_file
snake_case : int = load_spm(snake_case__ , self.sp_model_kwargs )
snake_case : List[Any] = len(self.encoder )
snake_case : Dict = {
self.get_lang_token(snake_case__ ): self.encoder_size + i for i, lang_code in enumerate(snake_case__ )
}
snake_case : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case__ )}
snake_case : List[str] = {v: k for k, v in self.lang_token_to_id.items()}
snake_case : List[str] = src_lang if src_lang is not None else "en"
snake_case : Optional[int] = tgt_lang
snake_case : Optional[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
snake_case : List[Any] = num_madeup_words
@property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int ) -> Dict:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case__ , self.encoder[self.unk_token] )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case__ , self.unk_token )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = []
snake_case : Optional[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
snake_case : str = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
snake_case : Dict = [1] * len(self.prefix_tokens )
snake_case : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : int = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Any ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.__dict__.copy()
snake_case : Optional[int] = None
return state
def __setstate__(self : Tuple , snake_case__ : Dict ) -> None:
'''simple docstring'''
snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : Dict = Path(snake_case__ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
snake_case : Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
snake_case : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , snake_case__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case__ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (str(snake_case__ ), str(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] , snake_case__ : str = "en" , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "ro" , **snake_case__ : Dict , ) -> BatchEncoding:
'''simple docstring'''
snake_case : List[str] = src_lang
snake_case : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[str] , snake_case__ : Optional[str] , **snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
snake_case : Union[str, Any] = src_lang
snake_case : Optional[Any] = self(snake_case__ , add_special_tokens=snake_case__ , **snake_case__ )
snake_case : Optional[int] = self.get_lang_id(snake_case__ )
snake_case : Union[str, Any] = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : List[str] = self.get_lang_token(snake_case__ )
snake_case : Tuple = self.lang_token_to_id[lang_token]
snake_case : Union[str, Any] = [self.cur_lang_id]
snake_case : int = [self.eos_token_id]
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : Any = self.get_lang_token(snake_case__ )
snake_case : Any = self.lang_token_to_id[lang_token]
snake_case : List[str] = [self.cur_lang_id]
snake_case : Optional[int] = [self.eos_token_id]
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
snake_case : Any = self.get_lang_token(snake_case__ )
return self.lang_token_to_id[lang_token]
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict[str, Any] ):
snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def UpperCamelCase ( __lowerCamelCase : str ):
with open(__lowerCamelCase , "r" ) as f:
return json.load(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
__lowerCamelCase = "Input must be a string of 8 numbers plus letter"
__lowerCamelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def UpperCamelCase ( __lowerCamelCase : str ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case : Tuple = f"""Expected string as input, found {type(__lowerCamelCase ).__name__}"""
raise TypeError(__lowerCamelCase )
snake_case : Dict = spanish_id.replace("-" , "" ).upper()
if len(__lowerCamelCase ) != 9:
raise ValueError(__lowerCamelCase )
try:
snake_case : List[str] = int(spanish_id_clean[0:8] )
snake_case : Union[str, Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCamelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
import torch
from diffusers import StableDiffusionPipeline
__lowerCamelCase = """path-to-your-trained-model"""
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__lowerCamelCase = """A photo of sks dog in a bucket"""
__lowerCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple="pt" ):
snake_case : Tuple = {'''add_prefix_space''': True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(" " ) else {}
snake_case : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="max_length" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=None , ):
snake_case : List[Any] = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( lowerCamelCase__ ):
def __init__(self : Tuple , snake_case__ : str , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Any="train" , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : str=None , snake_case__ : str="" , ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case : str = Path(__lowerCamelCase ).joinpath(type_path + ".source" )
snake_case : int = Path(__lowerCamelCase ).joinpath(type_path + ".target" )
snake_case : Tuple = self.get_char_lens(self.src_file )
snake_case : Dict = max_source_length
snake_case : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
snake_case : Tuple = tokenizer
snake_case : List[Any] = prefix
if n_obs is not None:
snake_case : Optional[Any] = self.src_lens[:n_obs]
snake_case : Any = src_lang
snake_case : Optional[Any] = tgt_lang
def __len__(self : int ) -> str:
'''simple docstring'''
return len(self.src_lens )
def __getitem__(self : List[str] , snake_case__ : Union[str, Any] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case : List[Any] = index + 1 # linecache starts at 1
snake_case : Any = self.prefix + linecache.getline(str(self.src_file ) , __lowerCamelCase ).rstrip("\n" )
snake_case : Any = linecache.getline(str(self.tgt_file ) , __lowerCamelCase ).rstrip("\n" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer
)
snake_case : Dict = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer
snake_case : Union[str, Any] = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_source_length , "right" )
snake_case : Any = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_target_length , "right" )
snake_case : List[str] = source_inputs['''input_ids'''].squeeze()
snake_case : str = target_inputs['''input_ids'''].squeeze()
snake_case : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : int ) -> List[str]:
'''simple docstring'''
return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()]
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case : Union[str, Any] = torch.stack([x["input_ids"] for x in batch] )
snake_case : Union[str, Any] = torch.stack([x["attention_mask"] for x in batch] )
snake_case : List[Any] = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCamelCase )
else self.tokenizer.pad_token_id
)
snake_case : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCamelCase )
else self.tokenizer.pad_token_id
)
snake_case : Dict = trim_batch(__lowerCamelCase , __lowerCamelCase )
snake_case : Union[str, Any] = trim_batch(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase )
snake_case : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__lowerCamelCase = getLogger(__name__)
def UpperCamelCase ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : int = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , "git_log.json" ) )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=4 , **__lowerCamelCase : List[str] ):
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase ( __lowerCamelCase : Tuple ):
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def UpperCamelCase ( ):
snake_case : str = git.Repo(search_parent_directories=lowerCamelCase_ )
snake_case : List[Any] = {
'''repo_id''': str(lowerCamelCase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ):
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
with open(lowerCamelCase_ , "wb" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase ( __lowerCamelCase : int ):
def remove_articles(__lowerCamelCase : List[str] ):
return re.sub(r"\b(a|an|the)\b" , " " , lowerCamelCase_ )
def white_space_fix(__lowerCamelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : List[str] ):
snake_case : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
snake_case : Optional[Any] = normalize_answer(lowerCamelCase_ ).split()
snake_case : str = normalize_answer(lowerCamelCase_ ).split()
snake_case : str = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
snake_case : Dict = sum(common.values() )
if num_same == 0:
return 0
snake_case : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
snake_case : List[str] = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def UpperCamelCase ( __lowerCamelCase : int ):
return model_prefix.startswith("rag" )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str ):
snake_case : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("config doesn\'t have a `{}` attribute".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
snake_case : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( A_ ):
A__ : List[Any] = ["""image_processor""", """tokenizer"""]
A__ : str = """BlipImageProcessor"""
A__ : List[str] = """AutoTokenizer"""
def __init__(self : str , snake_case__ : Any , snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = False
super().__init__(_A , _A )
snake_case : int = self.image_processor
def __call__(self : Dict , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : int , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
snake_case : Optional[Any] = self.tokenizer
snake_case : Optional[int] = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
# add pixel_values
snake_case : Dict = self.image_processor(_A , return_tensors=_A )
if text is not None:
snake_case : List[str] = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
else:
snake_case : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , *snake_case__ : Any , **snake_case__ : Tuple ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _SCREAMING_SNAKE_CASE (self : Tuple , *snake_case__ : int , **snake_case__ : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : str = self.tokenizer.model_input_names
snake_case : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
import re
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
if len(re.findall("[ATCG]" , lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase = """\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"""
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=8 ):
snake_case : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( __a ):
def __init__(self : Union[str, Any] , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
snake_case : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
snake_case : List[str] = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Optional[int] = latents.to(a__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Optional[Any]=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
snake_case : List[Any] = torch.device(f"""cuda:{gpu_id}""" )
snake_case : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : List[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
snake_case : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Any = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
snake_case : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__(self : int , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : torch.FloatTensor , snake_case__ : int = 5_12 , snake_case__ : int = 5_12 , snake_case__ : int = 1_00 , snake_case__ : float = 4.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
snake_case : str = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
snake_case : int = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
snake_case : Tuple = torch.cat(a__ , dim=0 )
snake_case : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : int = image_embeds.repeat_interleave(a__ , dim=0 )
snake_case : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
snake_case : Optional[int] = hint.repeat_interleave(a__ , dim=0 )
snake_case : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
snake_case : int = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
self.scheduler.set_timesteps(a__ , device=a__ )
snake_case : int = self.scheduler.timesteps
snake_case : Tuple = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
# create initial latent
snake_case : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : List[str] = {"image_embeds": image_embeds, "hint": hint}
snake_case : Union[str, Any] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Any = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Optional[int] = noise_pred.chunk(2 )
snake_case , snake_case : Optional[Any] = variance_pred.chunk(2 )
snake_case : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
snake_case : Dict = self.movq.decode(a__ , force_not_quantize=a__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : str = image.clamp(0 , 1 )
snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase ( lowerCamelCase__ ):
A__ : List[str] = "cvt"
def __init__(self : Optional[int] , snake_case__ : int=3 , snake_case__ : Any=[7, 3, 3] , snake_case__ : Dict=[4, 2, 2] , snake_case__ : Optional[int]=[2, 1, 1] , snake_case__ : Union[str, Any]=[64, 1_92, 3_84] , snake_case__ : Optional[Any]=[1, 3, 6] , snake_case__ : Union[str, Any]=[1, 2, 10] , snake_case__ : Any=[4.0, 4.0, 4.0] , snake_case__ : Any=[0.0, 0.0, 0.0] , snake_case__ : Optional[Any]=[0.0, 0.0, 0.0] , snake_case__ : str=[0.0, 0.0, 0.1] , snake_case__ : List[str]=[True, True, True] , snake_case__ : Any=[False, False, True] , snake_case__ : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , snake_case__ : Tuple=[3, 3, 3] , snake_case__ : Tuple=[1, 1, 1] , snake_case__ : Optional[Any]=[2, 2, 2] , snake_case__ : int=[1, 1, 1] , snake_case__ : Optional[Any]=[1, 1, 1] , snake_case__ : Dict=0.02 , snake_case__ : Tuple=1e-12 , **snake_case__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__A )
snake_case : Tuple = num_channels
snake_case : Tuple = patch_sizes
snake_case : Dict = patch_stride
snake_case : Optional[int] = patch_padding
snake_case : Optional[int] = embed_dim
snake_case : List[Any] = num_heads
snake_case : Any = depth
snake_case : Dict = mlp_ratio
snake_case : Optional[Any] = attention_drop_rate
snake_case : str = drop_rate
snake_case : Dict = drop_path_rate
snake_case : List[Any] = qkv_bias
snake_case : int = cls_token
snake_case : Union[str, Any] = qkv_projection_method
snake_case : int = kernel_qkv
snake_case : List[Any] = padding_kv
snake_case : int = stride_kv
snake_case : List[Any] = padding_q
snake_case : List[str] = stride_q
snake_case : Dict = initializer_range
snake_case : Dict = layer_norm_eps
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : List[str] , snake_case__ : str=13 , snake_case__ : Tuple=7 , snake_case__ : Optional[int]=True , snake_case__ : List[str]=True , snake_case__ : Optional[int]=True , snake_case__ : str=True , snake_case__ : int=99 , snake_case__ : Optional[int]=16 , snake_case__ : List[str]=36 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=6 , snake_case__ : int=6 , snake_case__ : List[str]=37 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : Union[str, Any]=16 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[int]=3 , snake_case__ : List[str]=4 , snake_case__ : Dict=None , ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = parent
snake_case : List[str] = batch_size
snake_case : Any = seq_length
snake_case : Optional[int] = is_training
snake_case : Any = use_input_mask
snake_case : List[Any] = use_token_type_ids
snake_case : Optional[Any] = use_labels
snake_case : Optional[Any] = vocab_size
snake_case : Union[str, Any] = embedding_size
snake_case : Tuple = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : List[Any] = num_hidden_groups
snake_case : Dict = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Dict = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : str = num_labels
snake_case : str = num_choices
snake_case : List[str] = scope
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_input_mask:
snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Any = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : int = None
snake_case : List[Any] = None
snake_case : Optional[int] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = AlbertModel(config=__a )
model.to(__a )
model.eval()
snake_case : Any = model(__a , attention_mask=__a , token_type_ids=__a )
snake_case : List[str] = model(__a , token_type_ids=__a )
snake_case : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Any , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = AlbertForPreTraining(config=__a )
model.to(__a )
model.eval()
snake_case : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : str = AlbertForMaskedLM(config=__a )
model.to(__a )
model.eval()
snake_case : Tuple = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
snake_case : str = AlbertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
snake_case : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.num_labels
snake_case : Optional[Any] = AlbertForSequenceClassification(__a )
model.to(__a )
model.eval()
snake_case : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.num_labels
snake_case : List[Any] = AlbertForTokenClassification(config=__a )
model.to(__a )
model.eval()
snake_case : Dict = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any ) -> Any:
'''simple docstring'''
snake_case : Dict = self.num_choices
snake_case : Dict = AlbertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : Optional[int] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str]=False ) -> Tuple:
'''simple docstring'''
snake_case : Any = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
snake_case : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = AlbertModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> int:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : List[Any] = type
self.model_tester.create_and_check_model(*__a )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = AlbertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : str = AlbertModel.from_pretrained("albert-base-v2" )
snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case : str = model(__a , attention_mask=__a )[0]
snake_case : List[str] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __a )
snake_case : Any = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase :
def __init__(self : Union[str, Any] , snake_case__ : List[str] ) -> None:
'''simple docstring'''
snake_case : Union[str, Any] = value
snake_case : Optional[int] = None
snake_case : Union[str, Any] = None
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : List[Any] = tree
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Any ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__(self : Dict ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ,a__ ):
A__ : List[str] = "maskformer-swin"
A__ : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : str , snake_case__ : Tuple=2_24 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[int]=3 , snake_case__ : List[str]=96 , snake_case__ : List[Any]=[2, 2, 6, 2] , snake_case__ : List[str]=[3, 6, 12, 24] , snake_case__ : Union[str, Any]=7 , snake_case__ : List[Any]=4.0 , snake_case__ : Tuple=True , snake_case__ : Dict=0.0 , snake_case__ : str=0.0 , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Tuple=False , snake_case__ : Tuple=0.02 , snake_case__ : List[str]=1e-5 , snake_case__ : Dict=None , snake_case__ : Any=None , **snake_case__ : Any , ) -> int:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Any = image_size
snake_case : Dict = patch_size
snake_case : Optional[Any] = num_channels
snake_case : List[Any] = embed_dim
snake_case : Dict = depths
snake_case : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = num_heads
snake_case : Dict = window_size
snake_case : Any = mlp_ratio
snake_case : List[str] = qkv_bias
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : Optional[int] = drop_path_rate
snake_case : Optional[int] = hidden_act
snake_case : int = use_absolute_embeddings
snake_case : List[str] = layer_norm_eps
snake_case : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
snake_case : Union[str, Any] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
snake_case : Any = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCamelCase = random.Random()
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None ):
if rng is None:
snake_case : Tuple = global_rng
snake_case : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any]=7 , snake_case__ : Any=4_00 , snake_case__ : Union[str, Any]=20_00 , snake_case__ : int=10 , snake_case__ : Optional[int]=1_60 , snake_case__ : Optional[int]=8 , snake_case__ : Any=0.0 , snake_case__ : List[Any]=40_00 , snake_case__ : Optional[Any]=False , snake_case__ : Optional[Any]=True , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : int = min_seq_length
snake_case : str = max_seq_length
snake_case : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case : int = padding_value
snake_case : Dict = sampling_rate
snake_case : Optional[int] = return_attention_mask
snake_case : Union[str, Any] = do_normalize
snake_case : Optional[Any] = feature_size
snake_case : Optional[int] = chunk_length
snake_case : Optional[Any] = hop_length
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : List[Any]=False , snake_case__ : Any=False ) -> Dict:
'''simple docstring'''
def _flatten(snake_case__ : Any ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
snake_case : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : Optional[Any] = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ,unittest.TestCase ):
A__ : int = WhisperFeatureExtractor if is_speech_available() else None
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case : int = WhisperFeatureExtractionTester(self )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : int = feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
snake_case : Tuple = self.feature_extraction_class.from_pretrained(snake_case__ )
snake_case : Any = feat_extract_first.to_dict()
snake_case : str = feat_extract_second.to_dict()
snake_case : Optional[int] = feat_extract_first.mel_filters
snake_case : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : str = os.path.join(snake_case__ , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case__ )
snake_case : Dict = self.feature_extraction_class.from_json_file(snake_case__ )
snake_case : Optional[Any] = feat_extract_first.to_dict()
snake_case : Union[str, Any] = feat_extract_second.to_dict()
snake_case : str = feat_extract_first.mel_filters
snake_case : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case : str = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test feature size
snake_case : Optional[int] = feature_extractor(snake_case__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
snake_case : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test batched
snake_case : str = feature_extractor(snake_case__ , return_tensors="np" ).input_features
snake_case : List[Any] = feature_extractor(snake_case__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case : str = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case : Tuple = np.asarray(snake_case__ )
snake_case : Union[str, Any] = feature_extractor(snake_case__ , return_tensors="np" ).input_features
snake_case : Optional[int] = feature_extractor(snake_case__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test truncation required
snake_case : Dict = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
snake_case : Tuple = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
snake_case : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case : Optional[Any] = [np.asarray(snake_case__ ) for speech_input in speech_inputs_truncated]
snake_case : Tuple = feature_extractor(snake_case__ , return_tensors="np" ).input_features
snake_case : List[str] = feature_extractor(snake_case__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Tuple = np.random.rand(1_00 , 32 ).astype(np.floataa )
snake_case : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case : Any = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case : Tuple = ds.sort("id" ).select(range(snake_case__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
snake_case : Any = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
snake_case : Union[str, Any] = self._load_datasamples(1 )
snake_case : List[Any] = WhisperFeatureExtractor()
snake_case : Union[str, Any] = feature_extractor(snake_case__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case__ , atol=1e-4 ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : List[Any] = self._load_datasamples(1 )[0]
snake_case : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
snake_case : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case__ )[0]
self.assertTrue(np.all(np.mean(snake_case__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case__ ) - 1 ) < 1e-3 ) )
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase ( __lowerCamelCase : List[str] ):
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def UpperCamelCase ( __lowerCamelCase : int ):
for char in word:
snake_case : Dict = ord(__lowerCamelCase )
if not _is_chinese_char(__lowerCamelCase ):
return 0
return 1
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : str = set()
for token in tokens:
snake_case : List[str] = len(__lowerCamelCase ) > 1 and is_chinese(__lowerCamelCase )
if chinese_word:
word_set.add(__lowerCamelCase )
snake_case : int = list(__lowerCamelCase )
return word_list
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
if not chinese_word_set:
return bert_tokens
snake_case : Optional[Any] = max([len(__lowerCamelCase ) for w in chinese_word_set] )
snake_case : Dict = bert_tokens
snake_case : Optional[int] = 0, len(__lowerCamelCase )
while start < end:
snake_case : Tuple = True
if is_chinese(bert_word[start] ):
snake_case : Optional[int] = min(end - start , __lowerCamelCase )
for i in range(__lowerCamelCase , 1 , -1 ):
snake_case : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case : Any = '''##''' + bert_word[j]
snake_case : Any = start + i
snake_case : Tuple = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
snake_case : Union[str, Any] = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
snake_case : Tuple = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
snake_case : Tuple = [get_chinese_word(__lowerCamelCase ) for r in res]
ltp_res.extend(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
snake_case : List[str] = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
snake_case : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCamelCase , truncation=__lowerCamelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
snake_case : Optional[int] = []
for input_ids, chinese_word in zip(__lowerCamelCase , __lowerCamelCase ):
snake_case : List[Any] = []
for id in input_ids:
snake_case : str = bert_tokenizer._convert_id_to_token(__lowerCamelCase )
input_tokens.append(__lowerCamelCase )
snake_case : Dict = add_sub_symbol(__lowerCamelCase , __lowerCamelCase )
snake_case : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCamelCase ):
if token[:2] == "##":
snake_case : str = token[2:]
# save chinese tokens' pos
if len(__lowerCamelCase ) == 1 and _is_chinese_char(ord(__lowerCamelCase ) ):
ref_id.append(__lowerCamelCase )
ref_ids.append(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
return ref_ids
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case : Optional[Any] = f.readlines()
snake_case : Any = [line.strip() for line in data if len(__lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case : Optional[int] = LTP(args.ltp ) # faster in GPU device
snake_case : Any = BertTokenizer.from_pretrained(args.bert )
snake_case : Optional[Any] = prepare_ref(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case : Optional[Any] = [json.dumps(__lowerCamelCase ) + '''\n''' for ref in ref_ids]
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__lowerCamelCase = parser.parse_args()
main(args)
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__lowerCamelCase = ['small', 'medium', 'large']
__lowerCamelCase = 'lm_head.decoder.weight'
__lowerCamelCase = 'lm_head.weight'
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
snake_case : Optional[int] = torch.load(snake_case_ )
snake_case : Union[str, Any] = d.pop(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
__lowerCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__lowerCamelCase = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
__lowerCamelCase = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__lowerCamelCase = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCamelCase ( __lowerCamelCase : Any ):
snake_case : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Optional[Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case : Union[str, Any] = s_dict.pop(__SCREAMING_SNAKE_CASE )
elif "subsample" in key:
snake_case : List[Any] = s_dict.pop(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
snake_case : int = emb.weight.shape
snake_case : List[Any] = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
snake_case : Dict = emb.weight.data
return lin_layer
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
snake_case : List[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )
snake_case : int = mam_aaa["args"]
snake_case : Tuple = mam_aaa["model"]
snake_case : Dict = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
rename_keys(__SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
snake_case : List[Any] = args.share_decoder_input_output_embed
snake_case : Optional[int] = [int(__SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split("," )]
snake_case : Optional[int] = SpeechaTextConfig(
vocab_size=__SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(__SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=__SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=__SCREAMING_SNAKE_CASE , )
snake_case : int = SpeechaTextForConditionalGeneration(__SCREAMING_SNAKE_CASE )
snake_case : Dict = model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0 and not set(__SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case : Any = lm_head_weights
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__lowerCamelCase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCAmelCase ( lowerCamelCase_ ):
A__ : int = '''blenderbot-small'''
A__ : str = ['''past_key_values''']
A__ : int = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self : Any , snake_case__ : Union[str, Any]=5_02_65 , snake_case__ : Optional[int]=5_12 , snake_case__ : Optional[Any]=8 , snake_case__ : Tuple=20_48 , snake_case__ : Union[str, Any]=16 , snake_case__ : Union[str, Any]=8 , snake_case__ : int=20_48 , snake_case__ : Optional[int]=16 , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[Any]=True , snake_case__ : Dict=True , snake_case__ : int="gelu" , snake_case__ : Optional[int]=5_12 , snake_case__ : str=0.1 , snake_case__ : int=0.0 , snake_case__ : str=0.0 , snake_case__ : Any=0.02 , snake_case__ : int=1 , snake_case__ : List[Any]=False , snake_case__ : List[str]=0 , snake_case__ : List[Any]=1 , snake_case__ : List[Any]=2 , snake_case__ : Dict=2 , **snake_case__ : Optional[int] , ) -> Any:
'''simple docstring'''
snake_case : Any = vocab_size
snake_case : List[Any] = max_position_embeddings
snake_case : Optional[Any] = d_model
snake_case : str = encoder_ffn_dim
snake_case : Optional[int] = encoder_layers
snake_case : Dict = encoder_attention_heads
snake_case : str = decoder_ffn_dim
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_attention_heads
snake_case : Optional[Any] = dropout
snake_case : Optional[int] = attention_dropout
snake_case : List[Any] = activation_dropout
snake_case : List[str] = activation_function
snake_case : Optional[Any] = init_std
snake_case : Optional[Any] = encoder_layerdrop
snake_case : Tuple = decoder_layerdrop
snake_case : Optional[Any] = use_cache
snake_case : Any = encoder_layers
snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
class UpperCAmelCase ( lowerCamelCase_ ):
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case : Tuple = {0: "batch"}
snake_case : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case : Tuple = {0: "batch", 1: "decoder_sequence"}
snake_case : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case , snake_case : List[str] = self.num_layers
for i in range(__snake_case ):
snake_case : List[str] = {0: "batch", 2: "past_sequence + sequence"}
snake_case : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
snake_case : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : List[Any] = super().outputs
else:
snake_case : Tuple = super(__snake_case , self ).outputs
if self.use_past:
snake_case , snake_case : Optional[Any] = self.num_layers
for i in range(__snake_case ):
snake_case : str = {0: "batch", 2: "past_sequence + sequence"}
snake_case : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
snake_case : Union[str, Any] = seq_length if not self.use_past else 1
snake_case : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
snake_case : Union[str, Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case : List[str] = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case , snake_case : int = common_inputs["input_ids"].shape
snake_case : Optional[Any] = common_inputs["decoder_input_ids"].shape[1]
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : List[Any] = decoder_seq_length + 3
snake_case : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__snake_case , __snake_case )] , dim=1 )
snake_case : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case , snake_case : List[str] = self.num_layers
snake_case : int = min(__snake_case , __snake_case )
snake_case : str = max(__snake_case , __snake_case ) - min_num_layers
snake_case : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
snake_case : int = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case , snake_case : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case : Any = seqlen + 2
snake_case , snake_case : List[Any] = self.num_layers
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : List[Any] = common_inputs["attention_mask"].dtype
snake_case : List[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
snake_case : List[Any] = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : List[str] = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : Any = tokenizer.num_special_tokens_to_add(__snake_case )
snake_case : Union[str, Any] = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
snake_case : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : Dict = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
snake_case : Dict = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
snake_case : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : str = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
snake_case : Optional[int] = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
A__ : Any = "data2vec-audio"
def __init__(self : str , snake_case__ : List[Any]=32 , snake_case__ : Any=7_68 , snake_case__ : Optional[Any]=12 , snake_case__ : Any=12 , snake_case__ : Any=30_72 , snake_case__ : Tuple="gelu" , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=0.02 , snake_case__ : Dict=1e-5 , snake_case__ : Dict="gelu" , snake_case__ : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Dict=False , snake_case__ : str=16 , snake_case__ : List[str]=19 , snake_case__ : Union[str, Any]=5 , snake_case__ : List[Any]=0.05 , snake_case__ : Tuple=10 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=0.0 , snake_case__ : List[str]=10 , snake_case__ : int=0 , snake_case__ : Optional[int]="sum" , snake_case__ : List[str]=False , snake_case__ : List[str]=False , snake_case__ : Dict=2_56 , snake_case__ : Dict=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : int=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Dict=5_12 , snake_case__ : Optional[Any]=0 , snake_case__ : Optional[int]=1 , snake_case__ : List[Any]=2 , snake_case__ : int=False , snake_case__ : List[str]=3 , snake_case__ : Union[str, Any]=2 , snake_case__ : Any=3 , snake_case__ : int=None , **snake_case__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
snake_case : Dict = hidden_size
snake_case : Any = feat_extract_activation
snake_case : int = list(A_ )
snake_case : Optional[Any] = list(A_ )
snake_case : Optional[int] = list(A_ )
snake_case : Tuple = conv_bias
snake_case : Optional[Any] = num_conv_pos_embeddings
snake_case : Optional[int] = num_conv_pos_embedding_groups
snake_case : str = conv_pos_kernel_size
snake_case : int = len(self.conv_dim )
snake_case : List[str] = num_hidden_layers
snake_case : str = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Any = num_attention_heads
snake_case : int = hidden_dropout
snake_case : List[Any] = attention_dropout
snake_case : int = activation_dropout
snake_case : Any = feat_proj_dropout
snake_case : Optional[Any] = final_dropout
snake_case : Optional[Any] = layerdrop
snake_case : str = layer_norm_eps
snake_case : List[str] = initializer_range
snake_case : Optional[int] = vocab_size
snake_case : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : Dict = mask_time_prob
snake_case : Optional[Any] = mask_time_length
snake_case : Union[str, Any] = mask_time_min_masks
snake_case : List[Any] = mask_feature_prob
snake_case : List[Any] = mask_feature_length
snake_case : int = mask_feature_min_masks
# ctc loss
snake_case : Dict = ctc_loss_reduction
snake_case : List[Any] = ctc_zero_infinity
# adapter
snake_case : Optional[Any] = add_adapter
snake_case : Union[str, Any] = adapter_kernel_size
snake_case : List[Any] = adapter_stride
snake_case : List[Any] = num_adapter_layers
snake_case : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case : str = list(A_ )
snake_case : str = list(A_ )
snake_case : int = list(A_ )
snake_case : Tuple = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return math.prod(self.conv_stride )
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowerCamelCase = ["""gpt2"""]
__lowerCamelCase = """gpt2"""
if is_tf_available():
class UpperCAmelCase ( tf.Module ):
def __init__(self : Union[str, Any] , snake_case__ : str ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case : int = tokenizer
snake_case : Any = AutoConfig.from_pretrained(snake_case__ )
snake_case : Optional[int] = TFGPTaLMHeadModel.from_config(snake_case__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
snake_case : str = self.tokenizer(snake_case__ )
snake_case : Any = tokenized["input_ids"].to_tensor()
snake_case : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case : Dict = self.model(input_ids=snake_case__ , attention_mask=snake_case__ )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
super().setUp()
snake_case : List[Any] = [GPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case : List[Any] = [TFGPTaTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[str]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case : Tuple = tokenizer([test_inputs] , return_tensors="tf" )
snake_case : List[Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case : int = python_outputs[key].numpy()
snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case__ , tf.intaa ) == tf_outputs_values ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Union[str, Any] = tf.function(snake_case__ )
for test_inputs in self.test_sentences:
snake_case : Dict = tf.constant(snake_case__ )
snake_case : int = compiled_tokenizer(snake_case__ )
snake_case : List[str] = tf_tokenizer(snake_case__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : List[str] = ModelToSave(tokenizer=snake_case__ )
snake_case : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = model.serving(snake_case__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case : List[str] = Path(snake_case__ ) / "saved.model"
tf.saved_model.save(snake_case__ , snake_case__ , signatures={"serving_default": model.serving} )
snake_case : int = tf.saved_model.load(snake_case__ )
snake_case : int = loaded_model.signatures["serving_default"](snake_case__ )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Any = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[str] = tf_tokenizer(snake_case__ ) # Build model with some sample inputs
snake_case : Any = tf_tokenizer.get_config()
snake_case : int = TFGPTaTokenizer.from_config(snake_case__ )
snake_case : Union[str, Any] = model_from_config(snake_case__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case : str = 12_31_23
for max_length in [3, 5, 10_24]:
snake_case : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = tf_tokenizer(snake_case__ , max_length=snake_case__ )
snake_case : List[str] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
import math
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ):
snake_case : str = end or len(__lowerCamelCase )
for i in range(__lowerCamelCase , __lowerCamelCase ):
snake_case : Any = i
snake_case : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case : str = array[temp_index - 1]
temp_index -= 1
snake_case : Optional[int] = temp_index_value
return array
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ): # Max Heap
snake_case : Optional[Any] = index
snake_case : Union[str, Any] = 2 * index + 1 # Left Node
snake_case : int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case : Optional[Any] = right_index
if largest != index:
snake_case : str = array[largest], array[index]
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : list ):
snake_case : Any = len(__lowerCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(n - 1 , 0 , -1 ):
snake_case : Union[str, Any] = array[0], array[i]
heapify(__lowerCamelCase , 0 , __lowerCamelCase )
return array
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Optional[Any] = low
snake_case : Any = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case : List[Any] = array[j], array[i]
i += 1
def UpperCamelCase ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) == 0:
return array
snake_case : Optional[Any] = 2 * math.ceil(math.loga(len(__lowerCamelCase ) ) )
snake_case : str = 16
return intro_sort(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowerCamelCase )
max_depth -= 1
snake_case : Tuple = median_of_a(__lowerCamelCase , __lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
snake_case : List[str] = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
intro_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case : List[str] = p
return insertion_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = input("""Enter numbers separated by a comma : """).strip()
__lowerCamelCase = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=7 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=18 , snake_case__ : Tuple=30 , snake_case__ : Optional[Any]=4_00 , snake_case__ : List[Any]=True , snake_case__ : List[Any]=None , snake_case__ : Tuple=True , snake_case__ : Tuple=None , snake_case__ : Tuple=True , snake_case__ : List[Any]=[0.48145466, 0.4578275, 0.40821073] , snake_case__ : Dict=[0.26862954, 0.26130258, 0.27577711] , snake_case__ : Optional[int]=True , ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = size if size is not None else {"height": 2_24, "width": 2_24}
snake_case : str = crop_size if crop_size is not None else {"height": 18, "width": 18}
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : Optional[Any] = num_channels
snake_case : List[Any] = image_size
snake_case : List[Any] = min_resolution
snake_case : Union[str, Any] = max_resolution
snake_case : int = do_resize
snake_case : Dict = size
snake_case : List[Any] = do_center_crop
snake_case : str = crop_size
snake_case : List[Any] = do_normalize
snake_case : int = image_mean
snake_case : List[Any] = image_std
snake_case : List[str] = do_convert_rgb
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=False , snake_case__ : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
snake_case : List[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
snake_case : int = []
for i in range(self.batch_size ):
snake_case : List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
snake_case : Optional[Any] = [torch.from_numpy(snake_case__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case__ )
@property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_24, "width": 2_24} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
snake_case : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : Optional[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
snake_case : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : int = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : int = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case__ )
snake_case : Tuple = 3
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
snake_case : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_MASKED_LM_MAPPING
A__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
snake_case : Dict = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 3_80_15, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 2_55_06, "token_str": " accuser"},
] , )
snake_case : Optional[int] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 3_80_15,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 2_55_06,
"token_str": " accuser",
},
] , )
snake_case : List[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
snake_case : Union[str, Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 3_56_76, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 1_64_16, "token_str": "ELS"},
] , )
snake_case : int = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 3_56_76,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 1_64_16, "token_str": "ELS"},
] , )
snake_case : Any = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 29_41, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_36_06, "token_str": " Clara"},
] , )
snake_case : Tuple = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
snake_case : Any = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
snake_case : List[str] = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Any = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(snake_case__ )
@slow
@require_tf
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 6_10, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 15_73, "token_str": " Chris"},
] , )
snake_case : Any = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 22_01,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_27_90,
"token_str": " Lyon",
},
] , )
snake_case : List[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
snake_case : str = None
snake_case : Optional[int] = None
self.run_pipeline_test(snake_case__ , [] )
@require_tf
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
snake_case : Tuple = None
snake_case : Tuple = None
self.run_pipeline_test(snake_case__ , [] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
snake_case : int = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : Dict = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] , snake_case__ : Dict ) -> str:
'''simple docstring'''
snake_case : List[str] = fill_masker.tokenizer
snake_case : Union[str, Any] = fill_masker.model
snake_case : List[str] = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : Dict = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : List[Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case__ , [
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
] , )
with self.assertRaises(snake_case__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case__ ):
fill_masker("This is" )
self.run_test_top_k(snake_case__ , snake_case__ )
self.run_test_targets(snake_case__ , snake_case__ )
self.run_test_top_k_targets(snake_case__ , snake_case__ )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case__ , snake_case__ )
self.fill_mask_with_multiple_masks(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
snake_case : Dict = tokenizer.get_vocab()
snake_case : List[str] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case : List[str] = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , targets=snake_case__ )
snake_case : Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : Optional[int] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , snake_case__ )
snake_case : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(snake_case__ ) )
# Call argument
snake_case : List[Any] = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case__ )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , snake_case__ )
snake_case : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(snake_case__ ) )
# Score equivalence
snake_case : Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case__ )
snake_case : int = [top_mask["token_str"] for top_mask in outputs]
snake_case : List[str] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ) == set(snake_case__ ):
snake_case : Any = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case__ )
snake_case : Optional[int] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
# Raises with invalid
with self.assertRaises(snake_case__ ):
snake_case : Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case__ ):
snake_case : Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(snake_case__ ):
snake_case : int = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="" )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
snake_case : Any = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , top_k=2 )
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : str = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] , snake_case__ : int ) -> int:
'''simple docstring'''
snake_case : int = tokenizer.get_vocab()
snake_case : int = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
# top_k=2, ntargets=3
snake_case : str = sorted(vocab.keys() )[:3]
snake_case : int = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case : Any = [el["token_str"] for el in sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ).issubset(snake_case__ ):
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : str = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : int = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case : Optional[int] = sorted(vocab.keys() )[:3]
snake_case : str = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case : List[Any] = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case__ ) , 3 )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple , snake_case__ : str ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : Dict = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
] , )
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
from typing import Any
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ):
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
snake_case : dict = {}
snake_case : dict = {}
for state in states_space:
snake_case : int = observations_space[0]
snake_case : Optional[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
snake_case : Tuple = observations_space[o]
snake_case : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case : Union[str, Any] = ""
snake_case : Dict = -1
for k_state in states_space:
snake_case : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case : Tuple = probability
snake_case : int = k_state
# Update probabilities and pointers dicts
snake_case : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case : Any = arg_max
# The final observation
snake_case : Optional[int] = observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
snake_case : Optional[Any] = ""
snake_case : str = -1
for k_state in states_space:
snake_case : Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case : List[str] = probability
snake_case : Union[str, Any] = k_state
snake_case : List[str] = arg_max
# Process pointers backwards
snake_case : Union[str, Any] = last_state
snake_case : Tuple = []
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
snake_case : Union[str, Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
_validate_list(__lowerCamelCase , "observations_space" )
_validate_list(__lowerCamelCase , "states_space" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : str ):
if not isinstance(_object , __lowerCamelCase ):
snake_case : Tuple = f"""{var_name} must be a list"""
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case : int = f"""{var_name} must be a list of strings"""
raise ValueError(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_dict(__lowerCamelCase , "initial_probabilities" , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , "transition_probabilities" )
_validate_nested_dict(__lowerCamelCase , "emission_probabilities" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : str ):
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ):
if not isinstance(_object , __lowerCamelCase ):
snake_case : List[str] = f"""{var_name} must be a dict"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
snake_case : Optional[Any] = f"""{var_name} all keys must be strings"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
snake_case : Union[str, Any] = "nested dictionary " if nested else ""
snake_case : Any = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : int = 50 ):
snake_case : Tuple = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
snake_case : Union[str, Any] = {}
state_dict.pop("pixel_mean" , __lowerCamelCase )
state_dict.pop("pixel_std" , __lowerCamelCase )
snake_case : Union[str, Any] = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case : List[str] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
snake_case : Dict = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(2 ) )
if layer_nb == 0:
snake_case : Tuple = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
snake_case : Optional[Any] = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
snake_case : str = key.replace("layers.2" , "proj_out" )
snake_case : Dict = value
snake_case : List[str] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str="ybelkada/segment-anything" ):
snake_case : List[str] = hf_hub_download(__lowerCamelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
snake_case : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
snake_case : Optional[int] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case : str = SamConfig(
vision_config=__lowerCamelCase , )
elif "sam_vit_h" in model_name:
snake_case : int = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case : Dict = SamConfig(
vision_config=__lowerCamelCase , )
snake_case : Optional[int] = torch.load(__lowerCamelCase , map_location="cpu" )
snake_case : Union[str, Any] = replace_keys(__lowerCamelCase )
snake_case : Optional[int] = SamImageProcessor()
snake_case : List[Any] = SamProcessor(image_processor=__lowerCamelCase )
snake_case : List[Any] = SamModel(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
snake_case : Any = hf_model.to("cuda" )
snake_case : Optional[int] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
snake_case : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("RGB" )
snake_case : Tuple = [[[400, 650]]]
snake_case : List[Any] = [[1]]
snake_case : Optional[int] = processor(images=np.array(__lowerCamelCase ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : Dict = hf_model(**__lowerCamelCase )
snake_case : Tuple = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case : List[Any] = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : Union[str, Any] = hf_model(**__lowerCamelCase )
snake_case : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case : Tuple = ((75, 275, 1725, 850),)
snake_case : str = processor(images=np.array(__lowerCamelCase ) , input_boxes=__lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : Dict = hf_model(**__lowerCamelCase )
snake_case : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case : Optional[Any] = [[[400, 650], [800, 650]]]
snake_case : Tuple = [[1, 1]]
snake_case : Optional[int] = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : Optional[int] = hf_model(**__lowerCamelCase )
snake_case : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCamelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCamelCase = HfArgumentParser(InitializationArguments)
__lowerCamelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCamelCase = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
__lowerCamelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCamelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = dataset
snake_case : Optional[int] = process
snake_case : List[str] = params
def __len__(self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__(self : Optional[int] , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.dataset[i]
snake_case : List[str] = self.process(snake_case__ , **self.params )
return processed
class UpperCAmelCase ( A_ ):
def __init__(self : str , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any]=None ) -> int:
'''simple docstring'''
snake_case : Tuple = loader
snake_case : Union[str, Any] = infer
snake_case : Union[str, Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case : Tuple = None
snake_case : Optional[Any] = loader_batch_size
# Internal bookkeeping
snake_case : List[str] = None
snake_case : Any = None
def __len__(self : int ) -> Optional[Any]:
'''simple docstring'''
return len(self.loader )
def __iter__(self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = iter(self.loader )
return self
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case__ , snake_case__ ):
# Convert ModelOutput to tuple first
snake_case : Optional[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case__ , snake_case__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case : Optional[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case : List[str] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case : Optional[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case : str = self._loader_batch_data.__class__(snake_case__ )
self._loader_batch_index += 1
return result
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case : Union[str, Any] = next(self.iterator )
snake_case : Tuple = self.infer(snake_case__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case__ , torch.Tensor ):
snake_case : Optional[Any] = processed
else:
snake_case : Union[str, Any] = list(processed.keys() )[0]
snake_case : Dict = processed[key]
if isinstance(snake_case__ , snake_case__ ):
snake_case : Dict = len(snake_case__ )
else:
snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case : List[str] = observed_batch_size
# Setting internal index to unwrap the batch
snake_case : Tuple = processed
snake_case : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( A_ ):
def __init__(self : Tuple , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any=None ) -> str:
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ , snake_case__ )
def __iter__(self : str ) -> List[str]:
'''simple docstring'''
snake_case : str = iter(self.loader )
snake_case : Any = None
return self
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.subiterator is None:
snake_case : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case : List[str] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case : str = self.infer(next(self.iterator ) , **self.params )
snake_case : List[str] = next(self.subiterator )
return processed
class UpperCAmelCase ( A_ ):
def __iter__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = iter(self.loader )
return self
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = False
snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case : str = self.loader_batch_item()
snake_case : Tuple = item.pop("is_last" )
accumulator.append(snake_case__ )
if is_last:
return accumulator
while not is_last:
snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case__ , torch.Tensor ):
snake_case : Dict = processed
else:
snake_case : Tuple = list(processed.keys() )[0]
snake_case : List[Any] = processed[key]
if isinstance(snake_case__ , snake_case__ ):
snake_case : int = len(snake_case__ )
else:
snake_case : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case : Any = observed_batch_size
snake_case : List[Any] = processed
snake_case : Dict = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case : Union[str, Any] = self.loader_batch_item()
snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case__ )
if is_last:
return accumulator
else:
snake_case : List[str] = processed
snake_case : Union[str, Any] = item.pop("is_last" )
accumulator.append(snake_case__ )
return accumulator
class UpperCAmelCase ( A_ ):
def __init__(self : int , snake_case__ : Dataset , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = dataset
snake_case : Optional[Any] = key
def __len__(self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__(self : List[str] , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase ( A_ ):
def __init__(self : Union[str, Any] , snake_case__ : Dataset , snake_case__ : str , snake_case__ : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = dataset
snake_case : Tuple = keya
snake_case : Union[str, Any] = keya
def __len__(self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__(self : Union[str, Any] , snake_case__ : Optional[int] ) -> str:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__lowerCamelCase = {
"""google/pegasus-xsum""": 5_12,
}
class UpperCAmelCase ( A_ ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = PegasusTokenizer
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : Dict , snake_case__ : Dict=None , snake_case__ : int=None , snake_case__ : Dict="<pad>" , snake_case__ : List[str]="</s>" , snake_case__ : str="<unk>" , snake_case__ : Union[str, Any]="<mask_2>" , snake_case__ : int="<mask_1>" , snake_case__ : Dict=None , snake_case__ : List[Any]=1_03 , **snake_case__ : Any , ) -> Any:
'''simple docstring'''
snake_case : Any = offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(snake_case__ )}, but is"""
f""" {type(snake_case__ )}""" )
snake_case : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
snake_case : Union[str, Any] = additional_special_tokens_extended
else:
snake_case : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , pad_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
snake_case : Union[str, Any] = vocab_file
snake_case : Optional[int] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List , snake_case__ : Optional[List] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
snake_case : Optional[int] = []
snake_case : int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
snake_case : int = result + left + right
return input_list
def UpperCamelCase ( __lowerCamelCase : list ):
"""simple docstring"""
if len(__lowerCamelCase ) <= 1:
return input_list
snake_case : Optional[int] = list(__lowerCamelCase )
# iteration for two-way merging
snake_case : Optional[Any] = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
snake_case : int = i
snake_case : Optional[Any] = i + p - 1
snake_case : Any = (low + high + 1) // 2
snake_case : List[str] = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
snake_case : List[str] = i
snake_case : Optional[Any] = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
__lowerCamelCase = []
else:
__lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( A_ ):
A__ : str = "new-model"
if is_tf_available():
class UpperCAmelCase ( A_ ):
A__ : int = NewModelConfig
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = "bert-base-cased"
snake_case : Any = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : Optional[int] = TFAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = "bert-base-cased"
snake_case : Tuple = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(snake_case__ )
snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(snake_case__ )
snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ )
snake_case : Any = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : Dict = TFAutoModelForSequenceClassification.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
snake_case : Optional[int] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
@require_tensorflow_probability
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Tuple:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
snake_case : List[Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case__ )
snake_case : Any = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 1_44_10 )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 1_44_10 )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case : int = copy.deepcopy(model.config )
snake_case : Tuple = ["FunnelBaseModel"]
snake_case : int = TFAutoModel.from_config(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ )
snake_case : Tuple = TFAutoModel.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
try:
AutoConfig.register("new-model" , snake_case__ )
snake_case : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case__ ):
auto_class.register(snake_case__ , snake_case__ )
auto_class.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
auto_class.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case : List[Any] = BertModelTester(self ).get_config()
snake_case : Dict = NewModelConfig(**tiny_config.to_dict() )
snake_case : Optional[Any] = auto_class.from_config(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ )
snake_case : Optional[int] = auto_class.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case : Optional[Any] = TFAutoModel.from_pretrained("bert-base" )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case : Optional[int] = TFAutoModel.from_pretrained(snake_case__ , revision="aaaaaa" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
snake_case : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(snake_case__ , "Use `from_pt=True` to load this model" ):
snake_case : Tuple = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
snake_case : Dict = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
snake_case : Optional[int] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
snake_case : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import re
def UpperCamelCase ( __lowerCamelCase : str ):
if len(re.findall("[ATCG]" , __lowerCamelCase ) ) != len(__lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
snake_case : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
snake_case : Any = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case : int = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
snake_case : int = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case : Dict = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
snake_case : Union[str, Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case : Tuple = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
snake_case : Tuple = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case : int = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """Hello, World!"""
__lowerCamelCase = """en_XX"""
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool ):
snake_case : str = Path("data_bin" )
snake_case : str = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__lowerCamelCase ).parent ) , checkpoint_file=Path(__lowerCamelCase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__lowerCamelCase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__lowerCamelCase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__lowerCamelCase )
snake_case : Tuple = xmod.model.encoder.sentence_encoder
snake_case : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
snake_case : Dict = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , __lowerCamelCase )
snake_case : Optional[Any] = XmodForSequenceClassification(__lowerCamelCase ) if classification_head else XmodForMaskedLM(__lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case : List[str] = xmod_sent_encoder.embed_tokens.weight
snake_case : int = xmod_sent_encoder.embed_positions.weight
snake_case : Tuple = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
snake_case : int = xmod_sent_encoder.layernorm_embedding.weight
snake_case : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case : Union[str, Any] = model.roberta.encoder.layer[i]
snake_case : Optional[int] = xmod_sent_encoder.layers[i]
# self attention
snake_case : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
snake_case : Any = xmod_layer.self_attn.q_proj.weight
snake_case : Optional[Any] = xmod_layer.self_attn.q_proj.bias
snake_case : int = xmod_layer.self_attn.k_proj.weight
snake_case : Optional[Any] = xmod_layer.self_attn.k_proj.bias
snake_case : Optional[int] = xmod_layer.self_attn.v_proj.weight
snake_case : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
snake_case : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
snake_case : Tuple = xmod_layer.self_attn.out_proj.weight
snake_case : Dict = xmod_layer.self_attn.out_proj.bias
snake_case : Any = xmod_layer.self_attn_layer_norm.weight
snake_case : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
snake_case : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
snake_case : List[Any] = xmod_layer.fca.weight
snake_case : List[str] = xmod_layer.fca.bias
# output
snake_case : str = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
snake_case : Dict = xmod_layer.fca.weight
snake_case : List[Any] = xmod_layer.fca.bias
snake_case : Optional[int] = xmod_layer.final_layer_norm.weight
snake_case : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
snake_case : int = xmod_layer.adapter_layer_norm.weight
snake_case : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
snake_case : str = bert_output.adapter_modules[lang_code]
snake_case : Union[str, Any] = xmod_layer.adapter_modules[lang_code]
snake_case : str = from_adapter.fca.weight
snake_case : Optional[Any] = from_adapter.fca.bias
snake_case : Optional[int] = from_adapter.fca.weight
snake_case : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
snake_case : int = xmod_sent_encoder.layer_norm.weight
snake_case : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
snake_case : Any = xmod.model.classification_heads["mnli"].dense.weight
snake_case : Optional[Any] = xmod.model.classification_heads["mnli"].dense.bias
snake_case : Optional[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
snake_case : Dict = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case : str = xmod.model.encoder.lm_head.dense.weight
snake_case : Any = xmod.model.encoder.lm_head.dense.bias
snake_case : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
snake_case : str = xmod.model.encoder.lm_head.layer_norm.bias
snake_case : List[str] = xmod.model.encoder.lm_head.weight
snake_case : Optional[int] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case : Tuple = xmod.encode(__lowerCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__lowerCamelCase )
snake_case : Optional[int] = model(__lowerCamelCase )[0]
if classification_head:
snake_case : Tuple = xmod.model.classification_heads["mnli"](xmod.extract_features(__lowerCamelCase ) )
else:
snake_case : Optional[Any] = xmod.model(__lowerCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
snake_case : Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
snake_case : int = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__lowerCamelCase ).mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__lowerCamelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
import os
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : List[str] = len(grid[0] )
snake_case : List[str] = len(__lowerCamelCase )
snake_case : Dict = 0
snake_case : Any = 0
snake_case : Optional[int] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
snake_case : Any = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case : Any = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case : str = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case : str = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case : Dict = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
snake_case : Optional[Any] = max_product
return largest
def UpperCamelCase ( ):
snake_case : Any = []
with open(os.path.dirname(__lowerCamelCase ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
snake_case : List[Any] = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : str = "data2vec-vision"
def __init__(self : Optional[Any] , snake_case__ : Optional[int]=7_68 , snake_case__ : str=12 , snake_case__ : Union[str, Any]=12 , snake_case__ : Tuple=30_72 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Dict=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : str=0.02 , snake_case__ : str=1e-12 , snake_case__ : List[str]=2_24 , snake_case__ : Dict=16 , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=False , snake_case__ : str=False , snake_case__ : Any=False , snake_case__ : List[str]=0.1 , snake_case__ : Any=0.1 , snake_case__ : str=True , snake_case__ : Union[str, Any]=[3, 5, 7, 11] , snake_case__ : Tuple=[1, 2, 3, 6] , snake_case__ : Optional[int]=True , snake_case__ : List[str]=0.4 , snake_case__ : Any=2_56 , snake_case__ : Optional[int]=1 , snake_case__ : Dict=False , snake_case__ : List[Any]=2_55 , **snake_case__ : str , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : str = hidden_size
snake_case : int = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : str = layer_norm_eps
snake_case : int = image_size
snake_case : Optional[int] = patch_size
snake_case : Any = num_channels
snake_case : List[str] = use_mask_token
snake_case : int = use_absolute_position_embeddings
snake_case : Any = use_relative_position_bias
snake_case : Union[str, Any] = use_shared_relative_position_bias
snake_case : Optional[int] = layer_scale_init_value
snake_case : List[str] = drop_path_rate
snake_case : str = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case : Any = out_indices
snake_case : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case : int = use_auxiliary_head
snake_case : str = auxiliary_loss_weight
snake_case : List[Any] = auxiliary_channels
snake_case : Union[str, Any] = auxiliary_num_convs
snake_case : Union[str, Any] = auxiliary_concat_input
snake_case : Optional[int] = semantic_loss_ignore_index
class UpperCAmelCase ( A_ ):
A__ : List[str] = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-4
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A_ )
class UpperCAmelCase ( A_ ):
A__ : str = field(default="image-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"image": Image()} )
A__ : ClassVar[Features] = Features({"labels": ClassLabel} )
A__ : str = "image"
A__ : str = "labels"
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
snake_case : List[str] = copy.deepcopy(self )
snake_case : Optional[Any] = self.label_schema.copy()
snake_case : Any = features[self.label_column]
snake_case : List[str] = label_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCamelCase = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class UpperCAmelCase :
def __init__(self : int , snake_case__ : int = 14 ) -> None:
'''simple docstring'''
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case : Dict = primes[group]["prime"]
snake_case : Any = primes[group]["generator"]
snake_case : Any = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : int ) -> bool:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str:
'''simple docstring'''
snake_case : Dict = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("Invalid public key" )
snake_case : List[str] = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : int , snake_case__ : int ) -> bool:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ) -> str:
'''simple docstring'''
snake_case : Dict = int(snake_case__ , base=16 )
snake_case : str = int(snake_case__ , base=16 )
snake_case : Union[str, Any] = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("Invalid public key" )
snake_case : Dict = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : float ):
return 10 - x * x
def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
snake_case : str = a
while (b - a) >= 0.01:
# Find middle point
snake_case : List[str] = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
snake_case : str = c
else:
snake_case : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : list[int] ):
# Check if the input is valid
if not len(__lowerCamelCase ) == len(__lowerCamelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
snake_case : str = equationa
snake_case : List[Any] = equationa
# Calculate the determinants of the matrices
snake_case : Dict = aa * ba - aa * ba
snake_case : Optional[int] = ca * ba - ca * ba
snake_case : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
snake_case : Tuple = determinant_x / determinant
snake_case : Optional[int] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase ( ):
snake_case : Optional[int] = 9
snake_case : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case : List[str] = kruskal(__lowerCamelCase , __lowerCamelCase )
snake_case : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
def update_area_of_max_square(__lowerCamelCase : int , __lowerCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case : List[Any] = update_area_of_max_square(__lowerCamelCase , col + 1 )
snake_case : int = update_area_of_max_square(row + 1 , col + 1 )
snake_case : Union[str, Any] = update_area_of_max_square(row + 1 , __lowerCamelCase )
if mat[row][col]:
snake_case : Optional[Any] = 1 + min([right, diagonal, down] )
snake_case : List[Any] = max(largest_square_area[0] , __lowerCamelCase )
return sub_problem_sol
else:
return 0
snake_case : Union[str, Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case : Optional[Any] = update_area_of_max_square_using_dp_array(__lowerCamelCase , col + 1 , __lowerCamelCase )
snake_case : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowerCamelCase )
snake_case : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , __lowerCamelCase , __lowerCamelCase )
if mat[row][col]:
snake_case : Union[str, Any] = 1 + min([right, diagonal, down] )
snake_case : Dict = max(largest_square_area[0] , __lowerCamelCase )
snake_case : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case : Any = [0]
snake_case : Optional[Any] = [[-1] * cols for _ in range(__lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowerCamelCase )
return largest_square_area[0]
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
snake_case : Optional[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case : Optional[int] = dp_array[row][col + 1]
snake_case : Any = dp_array[row + 1][col + 1]
snake_case : Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case : Tuple = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case : Tuple = max(dp_array[row][col] , __lowerCamelCase )
else:
snake_case : int = 0
return largest_square_area
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ):
snake_case : Optional[Any] = [0] * (cols + 1)
snake_case : Any = [0] * (cols + 1)
snake_case : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case : Dict = current_row[col + 1]
snake_case : List[str] = next_row[col + 1]
snake_case : Optional[int] = next_row[col]
if mat[row][col] == 1:
snake_case : Optional[Any] = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case : List[str] = max(current_row[col] , __lowerCamelCase )
else:
snake_case : str = 0
snake_case : List[str] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase ( A_ ):
A__ : List[str] = ["image_processor"]
A__ : Union[str, Any] = "SamImageProcessor"
def __init__(self : Optional[int] , snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
super().__init__(snake_case__ )
snake_case : Tuple = self.image_processor
snake_case : Optional[int] = -10
snake_case : Any = self.image_processor.size["longest_edge"]
def __call__(self : int , snake_case__ : str=None , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : int , ) -> BatchEncoding:
'''simple docstring'''
snake_case : str = self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# pop arguments that are not used in the foward but used nevertheless
snake_case : List[str] = encoding_image_processor["original_sizes"]
if hasattr(snake_case__ , "numpy" ): # Checks if Torch or TF tensor
snake_case : Dict = original_sizes.numpy()
snake_case : List[str] = self._check_and_preprocess_points(
input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , )
snake_case : Tuple = self._normalize_and_convert(
snake_case__ , snake_case__ , input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , return_tensors=snake_case__ , )
return encoding_image_processor
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]=None , snake_case__ : List[Any]="pt" , ) -> Any:
'''simple docstring'''
if input_points is not None:
if len(snake_case__ ) != len(snake_case__ ):
snake_case : Dict = [
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] ) for point in input_points
]
else:
snake_case : Union[str, Any] = [
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ )
for point, original_size in zip(snake_case__ , snake_case__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
snake_case : str = self._pad_points_and_labels(snake_case__ , snake_case__ )
snake_case : Union[str, Any] = np.array(snake_case__ )
if input_labels is not None:
snake_case : Tuple = np.array(snake_case__ )
if input_boxes is not None:
if len(snake_case__ ) != len(snake_case__ ):
snake_case : Optional[int] = [
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] , is_bounding_box=snake_case__ )
for box in input_boxes
]
else:
snake_case : Union[str, Any] = [
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ , is_bounding_box=snake_case__ )
for box, original_size in zip(snake_case__ , snake_case__ )
]
snake_case : Tuple = np.array(snake_case__ )
if input_boxes is not None:
if return_tensors == "pt":
snake_case : Any = torch.from_numpy(snake_case__ )
# boxes batch size of 1 by default
snake_case : str = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
snake_case : List[Any] = tf.convert_to_tensor(snake_case__ )
# boxes batch size of 1 by default
snake_case : List[str] = tf.expand_dims(snake_case__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
snake_case : List[Any] = torch.from_numpy(snake_case__ )
# point batch size of 1 by default
snake_case : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
snake_case : Tuple = tf.convert_to_tensor(snake_case__ )
# point batch size of 1 by default
snake_case : List[Any] = tf.expand_dims(snake_case__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
snake_case : Optional[int] = torch.from_numpy(snake_case__ )
# point batch size of 1 by default
snake_case : Tuple = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
snake_case : int = tf.convert_to_tensor(snake_case__ )
# point batch size of 1 by default
snake_case : int = tf.expand_dims(snake_case__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = max([point.shape[0] for point in input_points] )
snake_case : Optional[int] = []
for i, point in enumerate(snake_case__ ):
if point.shape[0] != expected_nb_points:
snake_case : List[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
snake_case : Optional[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(snake_case__ )
snake_case : Any = processed_input_points
return input_points, input_labels
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : int , snake_case__ : np.ndarray , snake_case__ : Dict , snake_case__ : List[str]=False ) -> np.ndarray:
'''simple docstring'''
snake_case : Optional[int] = original_size
snake_case : Any = self.image_processor._get_preprocess_shape(snake_case__ , longest_edge=snake_case__ )
snake_case : Tuple = deepcopy(snake_case__ ).astype(snake_case__ )
if is_bounding_box:
snake_case : Any = coords.reshape(-1 , 2 , 2 )
snake_case : List[Any] = coords[..., 0] * (new_w / old_w)
snake_case : Tuple = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
snake_case : Tuple = coords.reshape(-1 , 4 )
return coords
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , snake_case__ : Any=None , ) -> Any:
'''simple docstring'''
if input_points is not None:
if hasattr(snake_case__ , "numpy" ): # Checks for TF or Torch tensor
snake_case : List[str] = input_points.numpy().tolist()
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_points[0] , snake_case__ ):
raise ValueError("Input points must be a list of list of floating points." )
snake_case : Dict = [np.array(snake_case__ ) for input_point in input_points]
else:
snake_case : Union[str, Any] = None
if input_labels is not None:
if hasattr(snake_case__ , "numpy" ):
snake_case : Dict = input_labels.numpy().tolist()
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_labels[0] , snake_case__ ):
raise ValueError("Input labels must be a list of list integers." )
snake_case : Optional[Any] = [np.array(snake_case__ ) for label in input_labels]
else:
snake_case : Optional[int] = None
if input_boxes is not None:
if hasattr(snake_case__ , "numpy" ):
snake_case : Optional[Any] = input_boxes.numpy().tolist()
if (
not isinstance(snake_case__ , snake_case__ )
or not isinstance(input_boxes[0] , snake_case__ )
or not isinstance(input_boxes[0][0] , snake_case__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
snake_case : Any = [np.array(snake_case__ ).astype(np.floataa ) for box in input_boxes]
else:
snake_case : Tuple = None
return input_points, input_labels, input_boxes
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] , *snake_case__ : str , **snake_case__ : str ) -> int:
'''simple docstring'''
return self.image_processor.post_process_masks(*snake_case__ , **snake_case__ )
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from __future__ import annotations
__lowerCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] , ):
snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
snake_case : Union[str, Any] = 1
snake_case : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
snake_case : Union[str, Any] = init[0]
snake_case : Dict = init[1]
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case : Union[str, Any] = [[f, g, x, y]]
snake_case : Dict = False # flag that is set when search is complete
snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case : List[str] = cell.pop()
snake_case : Union[str, Any] = next_cell[2]
snake_case : int = next_cell[3]
snake_case : Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case : List[Any] = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
snake_case : int = x + DIRECTIONS[i][0]
snake_case : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case : str = g + cost
snake_case : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case : Union[str, Any] = 1
snake_case : str = i
snake_case : Dict = []
snake_case : Optional[Any] = goal[0]
snake_case : Dict = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case : List[Any] = x - DIRECTIONS[action[x][y]][0]
snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
snake_case : Optional[int] = xa
snake_case : List[Any] = ya
invpath.append([x, y] )
snake_case : int = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__lowerCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__lowerCamelCase = 1
# the cost map which pushes the path closer to the goal
__lowerCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCamelCase = 99
__lowerCamelCase, __lowerCamelCase = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : Any = ShapEImgaImgPipeline
A__ : List[str] = ["image"]
A__ : str = ["image"]
A__ : Any = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
A__ : Tuple = False
@property
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[str]:
'''simple docstring'''
return 8
@property
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
snake_case : Optional[int] = CLIPVisionModel(snake_case__ )
return model
@property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
snake_case : Tuple = PriorTransformer(**snake_case__ )
return model
@property
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
snake_case : Optional[int] = ShapERenderer(**snake_case__ )
return model
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : int = self.dummy_prior
snake_case : str = self.dummy_image_encoder
snake_case : str = self.dummy_image_processor
snake_case : Tuple = self.dummy_renderer
snake_case : Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , )
snake_case : Dict = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] , snake_case__ : Any=0 ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("mps" ):
snake_case : List[Any] = torch.manual_seed(snake_case__ )
else:
snake_case : List[str] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
snake_case : List[str] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Any = "cpu"
snake_case : Optional[int] = self.get_dummy_components()
snake_case : Dict = self.pipeline_class(**snake_case__ )
snake_case : Any = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Any = pipe(**self.get_dummy_inputs(snake_case__ ) )
snake_case : Optional[int] = output.images[0]
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
snake_case : int = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = torch_device == "cpu"
snake_case : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : Any = self.get_dummy_components()
snake_case : Union[str, Any] = self.pipeline_class(**snake_case__ )
snake_case : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : int = 1
snake_case : Dict = 2
snake_case : Union[str, Any] = self.get_dummy_inputs(snake_case__ )
for key in inputs.keys():
if key in self.batch_params:
snake_case : str = batch_size * [inputs[key]]
snake_case : Optional[int] = pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
snake_case : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
snake_case : List[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
snake_case : Dict = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
snake_case : int = pipe(
snake_case__ , generator=snake_case__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase = {
"""gpt-neox-20b""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : int = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : int=None , snake_case__ : str=None , snake_case__ : Any="<|endoftext|>" , snake_case__ : Dict="<|endoftext|>" , snake_case__ : Optional[Any]="<|endoftext|>" , snake_case__ : Any=False , **snake_case__ : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
snake_case : List[str] = getattr(snake_case__ , pre_tok_state.pop("type" ) )
snake_case : int = add_prefix_space
snake_case : int = pre_tok_class(**snake_case__ )
snake_case : Dict = add_prefix_space
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : Tuple = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : "Conversation" ) -> List[int]:
'''simple docstring'''
snake_case : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
snake_case : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
snake_case : Dict = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Union[str, Any] = soup.findAll("h1" )
snake_case : int = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCamelCase , __lowerCamelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase ( A_ ):
def __init__(self : Tuple , snake_case__ : Any , snake_case__ : str=None , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = parent
snake_case : Union[str, Any] = config_class
snake_case : Dict = has_text_modality
snake_case : int = kwargs
snake_case : Any = common_properties
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : str = self.config_class(**self.inputs_dict )
snake_case : int = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
snake_case : Dict = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict )
snake_case : Optional[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : str = os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
snake_case : Any = self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
snake_case : Tuple = self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = self.config_class(**self.inputs_dict )
snake_case : List[str] = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Tuple = os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
snake_case : Any = self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_class.is_composition:
return
snake_case : Dict = self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = copy.deepcopy(snake_case__ )
snake_case : List[str] = self.config_class(**snake_case__ )
snake_case : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
snake_case : Tuple = "\n".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class UpperCAmelCase ( A_ ):
def __init__(self : int , **snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(snake_case__ )
def __call__(self : str , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
if "text_queries" in kwargs:
snake_case : Tuple = kwargs.pop("text_queries" )
if isinstance(snake_case__ , (str, Image.Image) ):
snake_case : Dict = {"image": image, "candidate_labels": candidate_labels}
else:
snake_case : Tuple = image
snake_case : Dict = super().__call__(snake_case__ , **snake_case__ )
return results
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = {}
if "threshold" in kwargs:
snake_case : Optional[Any] = kwargs["threshold"]
if "top_k" in kwargs:
snake_case : int = kwargs["top_k"]
return {}, {}, postprocess_params
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : int = load_image(inputs["image"] )
snake_case : int = inputs["candidate_labels"]
if isinstance(snake_case__ , snake_case__ ):
snake_case : int = candidate_labels.split("," )
snake_case : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
snake_case : Any = self.tokenizer(snake_case__ , return_tensors=self.framework )
snake_case : str = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Any ) -> int:
'''simple docstring'''
snake_case : Any = model_inputs.pop("target_size" )
snake_case : str = model_inputs.pop("candidate_label" )
snake_case : Dict = model_inputs.pop("is_last" )
snake_case : Optional[Any] = self.model(**snake_case__ )
snake_case : Tuple = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str]=0.1 , snake_case__ : Any=None ) -> str:
'''simple docstring'''
snake_case : Dict = []
for model_output in model_outputs:
snake_case : List[str] = model_output["candidate_label"]
snake_case : Union[str, Any] = BaseModelOutput(snake_case__ )
snake_case : str = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
snake_case : str = outputs["scores"][index].item()
snake_case : Optional[int] = self._get_bounding_box(outputs["boxes"][index][0] )
snake_case : Tuple = {"score": score, "label": label, "box": box}
results.append(snake_case__ )
snake_case : int = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
snake_case : Optional[Any] = results[:top_k]
return results
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
snake_case : int = box.int().tolist()
snake_case : Dict = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
__lowerCamelCase = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : Optional[int] , snake_case__ : Dict , snake_case__ : int=False , snake_case__ : Union[str, Any]=False , snake_case__ : List[Any]=False , snake_case__ : int=None , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Union[str, Any] , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case : Optional[Any] = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
snake_case : List[Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case : str = "<|endoftext|>" if eos_token is None else eos_token
snake_case : List[Any] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case : List[Any] = unk_token if pad_token is None else pad_token
snake_case : Optional[Any] = eos_token if bos_token is None else bos_token
else:
snake_case : List[Any] = "<pad>" if pad_token is None else pad_token
snake_case : Dict = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : int = do_lower_case
snake_case : Dict = remove_space
snake_case : Optional[Any] = keep_accents
snake_case : Optional[Any] = vocab_file
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
snake_case : Tuple = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case : List[Any] = re.compile(
f"""[{''.join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" )
def __getstate__(self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = self.__dict__.copy()
snake_case : Union[str, Any] = None
return state
def __setstate__(self : Dict , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Union[str, Any] = {}
snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> str:
'''simple docstring'''
snake_case : Optional[int] = self.non_printing_characters_re.sub("" , snake_case__ )
# Normalize whitespaces
snake_case : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
snake_case : Optional[Any] = unicodedata.normalize("NFC" , snake_case__ )
return text
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , **snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : int ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : str ) -> str:
'''simple docstring'''
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[str] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = []
snake_case : Optional[Any] = ""
snake_case : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
snake_case : Optional[Any] = True
snake_case : int = []
else:
current_sub_tokens.append(snake_case__ )
snake_case : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict[str, int]:
'''simple docstring'''
snake_case : Any = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, List[str]] , snake_case__ : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
snake_case : Dict = self.preprocess_text(snake_case__ )
snake_case : Optional[Any] = self.sp_model.encode(snake_case__ )
else:
snake_case : Optional[int] = [self.preprocess_text(snake_case__ ) for t in text]
snake_case : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
snake_case : Dict = torch.tensor(snake_case__ )
return token_ids
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[int, List[int]] ) -> str:
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : "Conversation" ) -> List[int]:
'''simple docstring'''
snake_case : Union[str, Any] = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
snake_case : List[Any] = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(snake_case__ ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=snake_case__ )
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = "ZinengTang/tvlt-base"
snake_case : str = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
snake_case : Any = self.get_image_processor()
snake_case : Optional[int] = self.get_feature_extractor()
snake_case : Optional[Any] = TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , snake_case__ )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = self.get_image_processor()
snake_case : Optional[Any] = self.get_feature_extractor()
snake_case : int = TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
snake_case : Tuple = np.ones([1_20_00] )
snake_case : Tuple = feature_extractor(snake_case__ , return_tensors="np" )
snake_case : Union[str, Any] = processor(audio=snake_case__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : Tuple = self.get_feature_extractor()
snake_case : int = TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
snake_case : Tuple = np.ones([3, 2_24, 2_24] )
snake_case : Union[str, Any] = image_processor(snake_case__ , return_tensors="np" )
snake_case : Dict = processor(images=snake_case__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.get_image_processor()
snake_case : Any = self.get_feature_extractor()
snake_case : Optional[Any] = TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
snake_case : Dict = np.ones([1_20_00] )
snake_case : str = np.ones([3, 2_24, 2_24] )
snake_case : Optional[Any] = processor(audio=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = self.get_image_processor()
snake_case : int = self.get_feature_extractor()
snake_case : Tuple = TvltProcessor(image_processor=snake_case__ , feature_extractor=snake_case__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : List[str] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
snake_case : Union[str, Any] = len(__lowerCamelCase )
snake_case : Tuple = max(__lowerCamelCase )
snake_case : Optional[Any] = min(__lowerCamelCase )
# create the counting array
snake_case : Union[str, Any] = coll_max + 1 - coll_min
snake_case : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __lowerCamelCase ):
snake_case : Optional[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
snake_case : Any = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __lowerCamelCase ) ):
snake_case : Union[str, Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
return "".join([chr(__lowerCamelCase ) for i in counting_sort([ord(__lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class UpperCAmelCase ( A_ ):
A__ : Union[str, Any] = "blip_2_vision_model"
def __init__(self : Any , snake_case__ : Optional[Any]=14_08 , snake_case__ : Optional[Any]=61_44 , snake_case__ : Any=39 , snake_case__ : str=16 , snake_case__ : Optional[Any]=2_24 , snake_case__ : str=14 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.00001 , snake_case__ : Optional[Any]=0.0 , snake_case__ : Union[str, Any]=1e-10 , snake_case__ : Dict=True , **snake_case__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = hidden_size
snake_case : Optional[Any] = intermediate_size
snake_case : List[Any] = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Union[str, Any] = patch_size
snake_case : Dict = image_size
snake_case : Tuple = initializer_range
snake_case : List[str] = attention_dropout
snake_case : str = layer_norm_eps
snake_case : Union[str, Any] = hidden_act
snake_case : Union[str, Any] = qkv_bias
@classmethod
def _SCREAMING_SNAKE_CASE (cls : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
snake_case : Optional[int] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
snake_case : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCAmelCase ( A_ ):
A__ : Optional[int] = "blip_2_qformer"
def __init__(self : Tuple , snake_case__ : List[Any]=3_05_22 , snake_case__ : Any=7_68 , snake_case__ : Any=12 , snake_case__ : List[str]=12 , snake_case__ : List[Any]=30_72 , snake_case__ : List[Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[int]=5_12 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Optional[int]=1e-12 , snake_case__ : List[str]=0 , snake_case__ : Tuple="absolute" , snake_case__ : Dict=2 , snake_case__ : Optional[Any]=14_08 , **snake_case__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : Union[str, Any] = vocab_size
snake_case : Any = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[Any] = hidden_act
snake_case : Optional[int] = intermediate_size
snake_case : int = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : List[str] = initializer_range
snake_case : Union[str, Any] = layer_norm_eps
snake_case : Union[str, Any] = position_embedding_type
snake_case : Union[str, Any] = cross_attention_frequency
snake_case : str = encoder_hidden_size
@classmethod
def _SCREAMING_SNAKE_CASE (cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Any ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
snake_case : List[str] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
snake_case : str = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCAmelCase ( A_ ):
A__ : int = "blip-2"
A__ : Optional[int] = True
def __init__(self : Dict , snake_case__ : str=None , snake_case__ : Union[str, Any]=None , snake_case__ : Any=None , snake_case__ : Any=32 , **snake_case__ : Dict ) -> Dict:
'''simple docstring'''
super().__init__(**snake_case__ )
if vision_config is None:
snake_case : Any = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
snake_case : Any = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
snake_case : Tuple = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case : int = BlipaVisionConfig(**snake_case__ )
snake_case : Dict = BlipaQFormerConfig(**snake_case__ )
snake_case : Any = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case : int = CONFIG_MAPPING[text_model_type](**snake_case__ )
snake_case : Optional[Any] = self.text_config.tie_word_embeddings
snake_case : str = self.text_config.is_encoder_decoder
snake_case : List[Any] = num_query_tokens
snake_case : str = self.vision_config.hidden_size
snake_case : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case : Optional[Any] = 1.0
snake_case : Optional[int] = 0.02
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Union[str, Any] , snake_case__ : BlipaVisionConfig , snake_case__ : BlipaQFormerConfig , snake_case__ : PretrainedConfig , **snake_case__ : List[Any] , ) -> Dict:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = copy.deepcopy(self.__dict__ )
snake_case : List[str] = self.vision_config.to_dict()
snake_case : int = self.qformer_config.to_dict()
snake_case : Any = self.text_config.to_dict()
snake_case : str = self.__class__.model_type
return output
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : int = 10**9 ):
snake_case : str = 1
snake_case : Union[str, Any] = 2
snake_case : Dict = 0
snake_case : Union[str, Any] = 0
snake_case : int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
snake_case : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
class UpperCAmelCase :
def __init__(self : int , snake_case__ : int ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = n
snake_case : Any = [None] * self.n
snake_case : List[Any] = 0 # index of the first element
snake_case : Tuple = 0
snake_case : Dict = 0
def __len__(self : Dict ) -> int:
'''simple docstring'''
return self.size
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> bool:
'''simple docstring'''
return self.size == 0
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
snake_case : Union[str, Any] = data
snake_case : Union[str, Any] = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
snake_case : int = self.array[self.front]
snake_case : Optional[Any] = None
snake_case : Any = (self.front + 1) % self.n
self.size -= 1
return temp
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : Optional[int] = CLIPTokenizer
A__ : List[Any] = CLIPTokenizerFast
A__ : Any = True
A__ : str = {}
A__ : List[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
snake_case : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
snake_case : Any = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
snake_case : Optional[Any] = {"unk_token": "<unk>"}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] , **snake_case__ : List[str] ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : int ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[Any] ) -> str:
'''simple docstring'''
snake_case : str = "lower newer"
snake_case : Any = "lower newer"
return input_text, output_text
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : str = "lower newer"
snake_case : Optional[Any] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
snake_case : Optional[Any] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
snake_case : List[str] = tokens + [tokenizer.unk_token]
snake_case : List[str] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@require_ftfy
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Dict = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
snake_case : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
snake_case : str = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
snake_case : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
snake_case : Any = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case : Tuple = "xa\u0303y" + " " + "x\xe3y"
snake_case : Optional[int] = tokenizer_s.tokenize(snake_case__ )
snake_case : Optional[int] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Test that the tokenization is identical on unicode of space type
snake_case : Union[str, Any] = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
snake_case : List[str] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Test that the tokenization is identical on unicode of line break type
snake_case : Optional[Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case : Any = tokenizer_s.tokenize(snake_case__ )
snake_case : Optional[int] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Optional[int] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case : int = f"""{text_of_1_token} {text_of_1_token}"""
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , )
snake_case : str = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) , )
snake_case : Optional[Any] = f""" {text}"""
snake_case : Any = self.rust_tokenizer_class.from_pretrained(
snake_case__ , use_fast=snake_case__ , )
snake_case : Any = tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[str]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
pass
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : list[list] ):
snake_case : Dict = current_set.copy()
for row_index, row in enumerate(__lowerCamelCase ):
snake_case : int = row[0]
for column_index, column in enumerate(__lowerCamelCase ):
if magnitude == 0:
snake_case : List[str] = column
continue
snake_case : Any = column / magnitude
# Subtract to cancel term
snake_case : str = current_set[0]
snake_case : str = [first_row]
snake_case : int = current_set[1::]
for row in current_set:
snake_case : List[str] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__lowerCamelCase )
continue
for column_index in range(len(__lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case : Tuple = final_set[0]
snake_case : Tuple = []
snake_case : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case : Optional[int] = simplify(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __lowerCamelCase )
snake_case : List[str] = resultant
return final_set
def UpperCamelCase ( __lowerCamelCase : list[list] ):
if len(__lowerCamelCase ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
snake_case : Dict = len(__lowerCamelCase ) + 1
if any(len(__lowerCamelCase ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(__lowerCamelCase , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(__lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case : List[Any] = equations.copy()
if any(0 in row for row in data_set ):
snake_case : List[Any] = data_set.copy()
snake_case : int = []
for row_index, row in enumerate(__lowerCamelCase ):
if 0 not in row:
snake_case : List[Any] = data_set.pop(__lowerCamelCase )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , __lowerCamelCase )
snake_case : str = data_set.copy()
snake_case : int = simplify(__lowerCamelCase )
snake_case : List[Any] = simplified[::-1]
snake_case : list = []
for row in simplified:
snake_case : Optional[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case : Union[str, Any] = row.copy()[: len(__lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__lowerCamelCase ) == 0:
solutions.append(0 )
continue
snake_case : List[str] = temp_row[1::]
snake_case : Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(__lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__lowerCamelCase )
snake_case : Union[str, Any] = []
for item in solutions:
final.append(float(round(__lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
class UpperCAmelCase :
def __init__(self : List[Any] , snake_case__ : str = "" , snake_case__ : bool = False ) -> None:
'''simple docstring'''
snake_case : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
snake_case : Optional[int] = is_leaf
snake_case : str = prefix
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
snake_case : Optional[int] = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
snake_case : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
snake_case : Tuple = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
snake_case : str = self.nodes[word[0]]
snake_case : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
snake_case : Optional[Any] = remaining_prefix
snake_case : Optional[Any] = self.nodes[matching_string[0]]
snake_case : str = RadixNode(snake_case__ , snake_case__ )
snake_case : List[str] = aux_node
if remaining_word == "":
snake_case : int = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> bool:
'''simple docstring'''
snake_case : Optional[int] = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
snake_case : Dict = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> bool:
'''simple docstring'''
snake_case : Tuple = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
snake_case : str = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
snake_case : List[str] = list(self.nodes.values() )[0]
snake_case : int = merging_node.is_leaf
self.prefix += merging_node.prefix
snake_case : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
snake_case : Tuple = False
# If there is 1 edge, we merge it with its child
else:
snake_case : List[str] = list(incoming_node.nodes.values() )[0]
snake_case : Any = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
snake_case : Any = merging_node.nodes
return True
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
snake_case : Tuple = "banana bananas bandana band apple all beast".split()
snake_case : Union[str, Any] = RadixNode()
root.insert_many(__lowerCamelCase )
assert all(root.find(__lowerCamelCase ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
snake_case : List[str] = RadixNode()
snake_case : List[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__lowerCamelCase )
print("Words:" , __lowerCamelCase )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
def __init__(self : List[str] , snake_case__ : Any , snake_case__ : str=2 , snake_case__ : Tuple=3 , snake_case__ : int=4 , snake_case__ : int=2 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=True , snake_case__ : Optional[int]=99 , snake_case__ : List[str]=36 , snake_case__ : Dict=3 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Union[str, Any]=5_12 , snake_case__ : Any=16 , snake_case__ : str=2 , snake_case__ : str=0.02 , snake_case__ : List[Any]=6 , snake_case__ : str=6 , snake_case__ : Optional[int]=3 , snake_case__ : List[Any]=4 , snake_case__ : Optional[int]=None , snake_case__ : List[str]=10_00 , ) -> Dict:
'''simple docstring'''
snake_case : Tuple = parent
snake_case : List[Any] = batch_size
snake_case : Optional[int] = num_channels
snake_case : List[Any] = image_size
snake_case : str = patch_size
snake_case : List[Any] = text_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[str] = use_input_mask
snake_case : Optional[int] = use_token_type_ids
snake_case : List[str] = use_labels
snake_case : List[Any] = vocab_size
snake_case : Dict = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : str = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Dict = initializer_range
snake_case : Any = coordinate_size
snake_case : Union[str, Any] = shape_size
snake_case : Optional[int] = num_labels
snake_case : Union[str, Any] = num_choices
snake_case : Tuple = scope
snake_case : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case : str = text_seq_length
snake_case : Dict = (image_size // patch_size) ** 2 + 1
snake_case : str = self.text_seq_length + self.image_seq_length
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case : str = bbox[i, j, 3]
snake_case : Tuple = bbox[i, j, 1]
snake_case : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case : Tuple = bbox[i, j, 2]
snake_case : Tuple = bbox[i, j, 0]
snake_case : Tuple = t
snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Any = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case : Any = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case : Tuple = None
snake_case : Optional[Any] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = LayoutLMvaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# text + image
snake_case : Optional[int] = model(snake_case__ , pixel_values=snake_case__ )
snake_case : str = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
snake_case : Optional[Any] = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , token_type_ids=snake_case__ )
snake_case : str = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case : Tuple = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case : Optional[int] = model(pixel_values=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Any , snake_case__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.num_labels
snake_case : List[str] = LayoutLMvaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : str = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : str ) -> Dict:
'''simple docstring'''
snake_case : Dict = self.num_labels
snake_case : Union[str, Any] = LayoutLMvaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Any = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[str] ) -> Any:
'''simple docstring'''
snake_case : Dict = LayoutLMvaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Optional[int] = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
snake_case : List[str] = self.prepare_config_and_inputs()
(
snake_case
) : Any = config_and_inputs
snake_case : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : str = False
A__ : Any = False
A__ : List[Any] = False
A__ : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Dict = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict ) -> Any:
'''simple docstring'''
return True
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = LayoutLMvaModelTester(self )
snake_case : List[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int=False ) -> Any:
'''simple docstring'''
snake_case : Dict = copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
snake_case : int = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(snake_case__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
snake_case : List[str] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in get_values(snake_case__ ):
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
snake_case : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=snake_case__ , )
return inputs_dict
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[Any] = LayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( ):
snake_case : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
snake_case : str = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(snake_case__ )
snake_case : List[str] = self.default_image_processor
snake_case : Tuple = prepare_img()
snake_case : List[Any] = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
snake_case : List[Any] = torch.tensor([[1, 2]] )
snake_case : List[str] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case : Union[str, Any] = model(
input_ids=input_ids.to(snake_case__ ) , bbox=bbox.to(snake_case__ ) , pixel_values=pixel_values.to(snake_case__ ) , )
# verify the logits
snake_case : Dict = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , snake_case__ )
snake_case : str = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ) )
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return [tuple(__lowerCamelCase )]
snake_case : str = []
def generate(__lowerCamelCase : int , __lowerCamelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __lowerCamelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case : Any = arr[k - 1], arr[i]
else: # k is odd
snake_case : Dict = arr[k - 1], arr[0]
generate(k - 1 , __lowerCamelCase )
generate(len(__lowerCamelCase ) , __lowerCamelCase )
return res
if __name__ == "__main__":
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
import datasets
__lowerCamelCase = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
__lowerCamelCase = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
__lowerCamelCase = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str , snake_case__ : str ) -> List[Any]:
'''simple docstring'''
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : Dict = "deberta-v2"
def __init__(self : Dict , snake_case__ : List[str]=12_81_00 , snake_case__ : Optional[int]=15_36 , snake_case__ : Union[str, Any]=24 , snake_case__ : Union[str, Any]=24 , snake_case__ : str=61_44 , snake_case__ : Optional[Any]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Dict=5_12 , snake_case__ : Dict=0 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[int]=1e-7 , snake_case__ : Tuple=False , snake_case__ : List[str]=-1 , snake_case__ : Dict=0 , snake_case__ : int=True , snake_case__ : Optional[int]=None , snake_case__ : Optional[Any]=0 , snake_case__ : int="gelu" , **snake_case__ : Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : Optional[Any] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : List[str] = relative_attention
snake_case : str = max_relative_positions
snake_case : Union[str, Any] = pad_token_id
snake_case : int = position_biased_input
# Backwards compatibility
if type(snake_case__ ) == str:
snake_case : str = [x.strip() for x in pos_att_type.lower().split("|" )]
snake_case : int = pos_att_type
snake_case : Tuple = vocab_size
snake_case : Union[str, Any] = layer_norm_eps
snake_case : List[Any] = kwargs.get("pooler_hidden_size" , snake_case__ )
snake_case : Optional[Any] = pooler_dropout
snake_case : Optional[Any] = pooler_hidden_act
class UpperCAmelCase ( A_ ):
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
return 12
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 40 , snake_case__ : int = 40 , snake_case__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Any = super().generate_dummy_inputs(preprocessor=snake_case__ , framework=snake_case__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCamelCase = logging.getLogger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = "masked_bert"
def __init__(self : str , snake_case__ : int=3_05_22 , snake_case__ : Any=7_68 , snake_case__ : str=12 , snake_case__ : str=12 , snake_case__ : Any=30_72 , snake_case__ : Optional[int]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=5_12 , snake_case__ : Tuple=2 , snake_case__ : Dict=0.02 , snake_case__ : str=1e-12 , snake_case__ : Tuple=0 , snake_case__ : Optional[Any]="topK" , snake_case__ : Optional[Any]="constant" , snake_case__ : str=0.0 , **snake_case__ : Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : Union[str, Any] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : List[str] = hidden_act
snake_case : Any = intermediate_size
snake_case : Tuple = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : int = initializer_range
snake_case : Optional[Any] = layer_norm_eps
snake_case : Dict = pruning_method
snake_case : List[str] = mask_init
snake_case : Union[str, Any] = mask_scale
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : List[str] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
snake_case : Tuple = AutoTokenizer.from_pretrained("google/mt5-small" )
snake_case : Optional[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
snake_case : Any = tokenizer("Hi I am" , return_tensors="np" ).input_ids
snake_case : Union[str, Any] = shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case : str = model(snake_case__ , decoder_input_ids=snake_case__ ).logits
snake_case : Optional[int] = optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
snake_case : Tuple = -(labels.shape[-1] * loss.item())
snake_case : Tuple = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
from math import ceil, sqrt
def UpperCamelCase ( __lowerCamelCase : int = 1000000 ):
snake_case : Union[str, Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
snake_case : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
snake_case : str = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def UpperCamelCase ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case : Optional[int] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("encoder" ):
snake_case : Optional[int] = k.replace(".attn" , ".self_attn" )
snake_case : List[str] = k.replace("norm1" , "self_attn_layer_norm" )
snake_case : Optional[Any] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
snake_case : str = k.replace("norm1" , "self_attn_layer_norm" )
snake_case : Union[str, Any] = k.replace("norm2" , "encoder_attn_layer_norm" )
snake_case : Any = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCamelCase ( __lowerCamelCase : Any ):
snake_case : Tuple = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
snake_case : int = sd.pop(__lowerCamelCase )
snake_case : str = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
snake_case : Union[str, Any] = v
__lowerCamelCase = ["""START"""]
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
snake_case : Optional[int] = torch.load(__lowerCamelCase , map_location="cpu" )
snake_case : Optional[Any] = model["model"]
snake_case : Optional[Any] = BlenderbotConfig.from_json_file(__lowerCamelCase )
snake_case : Optional[Any] = BlenderbotForConditionalGeneration(__lowerCamelCase )
snake_case : List[Any] = m.model.state_dict().keys()
snake_case : str = []
snake_case : str = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case : Optional[Any] = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
__lowerCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
from PIL import Image
def UpperCamelCase ( __lowerCamelCase : Image ):
snake_case : Optional[Any] = image.size
snake_case : Dict = 0
snake_case : List[Any] = image.load()
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
snake_case : Any = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__lowerCamelCase ):
for i in range(__lowerCamelCase ):
snake_case : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__(self : Any , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Optional[Any]=7 , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=True , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=99 , snake_case__ : Union[str, Any]=24 , snake_case__ : Any=2 , snake_case__ : List[str]=6 , snake_case__ : Any=37 , snake_case__ : Any="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=5_12 , snake_case__ : str=16 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : str=3 , snake_case__ : Tuple=None , snake_case__ : Optional[int]=10_00 , ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : int = batch_size
snake_case : Optional[int] = seq_length
snake_case : int = is_training
snake_case : Dict = use_input_mask
snake_case : Union[str, Any] = use_token_type_ids
snake_case : List[str] = use_labels
snake_case : Optional[Any] = vocab_size
snake_case : str = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Any = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Any = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : str = num_labels
snake_case : int = scope
snake_case : Any = range_bbox
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case : List[str] = bbox[i, j, 3]
snake_case : List[str] = bbox[i, j, 1]
snake_case : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case : str = bbox[i, j, 2]
snake_case : Union[str, Any] = bbox[i, j, 0]
snake_case : Dict = t
snake_case : Any = None
if self.use_input_mask:
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case : Optional[Any] = None
if self.use_token_type_ids:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Any = None
snake_case : Any = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Tuple , ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = LiltModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Tuple = model(snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
snake_case : Union[str, Any] = model(snake_case__ , bbox=snake_case__ , token_type_ids=snake_case__ )
snake_case : Union[str, Any] = model(snake_case__ , bbox=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = self.num_labels
snake_case : List[str] = LiltForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Union[str, Any] = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Dict , ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = LiltForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Any = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.prepare_config_and_inputs()
(
snake_case
) : Optional[int] = config_and_inputs
snake_case : int = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Tuple = False
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
return True
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
snake_case : Tuple = LiltModelTester(self )
snake_case : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Optional[Any] = type
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[int] = LiltModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@slow
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(snake_case__ )
snake_case : str = torch.tensor([[1, 2]] , device=snake_case__ )
snake_case : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case__ )
# forward pass
with torch.no_grad():
snake_case : Dict = model(input_ids=snake_case__ , bbox=snake_case__ )
snake_case : List[str] = torch.Size([1, 2, 7_68] )
snake_case : Optional[int] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=snake_case__ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case__ , atol=1e-3 ) )
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""yjernite/retribert-base-uncased""": 5_12,
}
__lowerCamelCase = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( A_ ):
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = RetriBertTokenizer
A__ : Tuple = ["input_ids", "attention_mask"]
def __init__(self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[int]=None , snake_case__ : str=True , snake_case__ : Dict="[UNK]" , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : List[str]="[MASK]" , snake_case__ : str=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) )
snake_case : str = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : Union[str, Any] = tokenize_chinese_chars
snake_case : Dict = normalizer_class(**snake_case__ )
snake_case : Any = do_lower_case
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] , snake_case__ : List[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : Optional[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
snake_case : Tuple = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> int:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
snake_case : Optional[int] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case : Tuple = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case : Dict = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
import PIL.Image
snake_case : List[str] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=snake_case__ ) as mock_cast_to_python_objects:
snake_case : str = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
snake_case : Optional[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , snake_case__ )
self.assertFalse(kwargs["optimize_list_casting"] )
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
snake_case : Union[str, Any] = pa.BufferReader(__lowerCamelCase ) if isinstance(__lowerCamelCase , pa.Buffer ) else pa.memory_map(__lowerCamelCase )
snake_case : str = pa.ipc.open_stream(__lowerCamelCase )
snake_case : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
snake_case : Union[str, Any] = pa.BufferOutputStream()
snake_case : List[Any] = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case : Any = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase ( ):
snake_case : str = pa.BufferOutputStream()
snake_case : List[Any] = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=__lowerCamelCase , features=__lowerCamelCase ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
snake_case : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case : List[Any] = pa.BufferReader(output.getvalue() )
snake_case : Optional[int] = pa.ipc.open_stream(__lowerCamelCase )
snake_case : pa.Table = f.read_all()
snake_case : Union[str, Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__lowerCamelCase )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def UpperCamelCase ( __lowerCamelCase : Tuple ):
snake_case : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt="split_name" , check_duplicates=__lowerCamelCase , ) as writer:
with pytest.raises(__lowerCamelCase ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
snake_case : Optional[int] = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt="split_name" , check_duplicates=__lowerCamelCase , ) as writer:
with pytest.raises(__lowerCamelCase ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
snake_case : Dict = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : Dict = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt="split_name" , check_duplicates=__lowerCamelCase , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
snake_case : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
snake_case : int = pa.BufferOutputStream()
snake_case : List[Any] = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
snake_case : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case : Dict = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
snake_case : Dict = pa.BufferOutputStream()
snake_case : List[str] = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
snake_case : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case : Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
snake_case : Optional[int] = pa.BufferOutputStream()
snake_case : List[str] = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
snake_case : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case : List[Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Union[str, Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test.arrow" )
with ArrowWriter(path=__lowerCamelCase , schema=pa.schema(__lowerCamelCase ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
snake_case : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(__lowerCamelCase , 1 )
def UpperCamelCase ( __lowerCamelCase : int ):
if pa.types.is_list(__lowerCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if isinstance(lst[0] , __lowerCamelCase ):
change_first_primitive_element_in_list(lst[0] , __lowerCamelCase )
else:
snake_case : List[str] = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
snake_case : Any = pa.array(TypedSequence(__lowerCamelCase , optimized_int_type=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
# in range
snake_case : Tuple = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case : Union[str, Any] = copy.deepcopy(__lowerCamelCase )
snake_case : Union[str, Any] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__lowerCamelCase , __lowerCamelCase )
snake_case : Any = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
snake_case : List[Any] = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=__lowerCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
snake_case : int = "mock://dataset-train.arrow"
with ArrowWriter(path=__lowerCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__lowerCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__lowerCamelCase )
def UpperCamelCase ( ):
snake_case : Optional[int] = pa.BufferOutputStream()
with ParquetWriter(stream=__lowerCamelCase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case : Optional[Any] = pa.BufferReader(output.getvalue() )
snake_case : pa.Table = pq.read_table(__lowerCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
import PIL.Image
snake_case : Dict = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__lowerCamelCase , format="png" )
snake_case : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=__lowerCamelCase , features=Features({"image": Image()} ) , embed_local_files=__lowerCamelCase ) as writer:
writer.write({"image": image_path} )
writer.finalize()
snake_case : str = pa.BufferReader(output.getvalue() )
snake_case : pa.Table = pq.read_table(__lowerCamelCase )
snake_case : Tuple = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCamelCase ( ):
snake_case : List[Any] = pa.schema([pa.field("col_1" , pa.string() , nullable=__lowerCamelCase )] )
snake_case : List[str] = pa.BufferOutputStream()
with ArrowWriter(stream=__lowerCamelCase ) as writer:
writer._build_writer(inferred_schema=__lowerCamelCase )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case : Any = str(bin(__lowerCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case : Any = str(bin(__lowerCamelCase ) )[2:]
if shift_amount >= len(__lowerCamelCase ):
return "0b0"
snake_case : Tuple = binary_number[: len(__lowerCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
if number >= 0: # Get binary representation of positive number
snake_case : List[str] = "0" + str(bin(__lowerCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case : Optional[Any] = len(bin(__lowerCamelCase )[3:] ) # Find 2's complement of number
snake_case : Dict = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
snake_case : Optional[int] = (
"1" + "0" * (binary_number_length - len(__lowerCamelCase )) + binary_number
)
if shift_amount >= len(__lowerCamelCase ):
return "0b" + binary_number[0] * len(__lowerCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : Tuple = torch.exp(__lowerCamelCase )
snake_case : Union[str, Any] = torch.sum(__lowerCamelCase , dim=1 ) # sum of exp(x_i)
snake_case : Tuple = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__lowerCamelCase ) - B / A
class UpperCAmelCase ( nn.Module ):
def __init__(self : Union[str, Any] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case : Tuple = config.output_attentions
snake_case : Tuple = config.output_hidden_states
snake_case : Optional[int] = nn.ModuleList([BertLayer(snake_case__ ) for _ in range(config.num_hidden_layers )] )
snake_case : Any = nn.ModuleList([BertHighway(snake_case__ ) for _ in range(config.num_hidden_layers )] )
snake_case : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
if (type(snake_case__ ) is float) or (type(snake_case__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case : List[str] = x
else:
snake_case : Any = x
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Any = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Tuple=None , snake_case__ : List[str]=None , ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = ()
snake_case : List[str] = ()
snake_case : Union[str, Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case : Union[str, Any] = all_hidden_states + (hidden_states,)
snake_case : Union[str, Any] = layer_module(
snake_case__ , snake_case__ , head_mask[i] , snake_case__ , snake_case__ )
snake_case : Optional[Any] = layer_outputs[0]
if self.output_attentions:
snake_case : Union[str, Any] = all_attentions + (layer_outputs[1],)
snake_case : str = (hidden_states,)
if self.output_hidden_states:
snake_case : Optional[int] = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case : Optional[Any] = current_outputs + (all_attentions,)
snake_case : Any = self.highway[i](snake_case__ )
# logits, pooled_output
if not self.training:
snake_case : int = highway_exit[0]
snake_case : List[str] = entropy(snake_case__ )
snake_case : List[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case : Optional[int] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(snake_case__ , i + 1 )
else:
snake_case : List[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case : List[str] = all_hidden_states + (hidden_states,)
snake_case : Any = (hidden_states,)
if self.output_hidden_states:
snake_case : Any = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case : List[str] = outputs + (all_attentions,)
snake_case : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " ,A_ ,)
class UpperCAmelCase ( A_ ):
def __init__(self : Any , snake_case__ : Any ) -> List[str]:
'''simple docstring'''
super().__init__(snake_case__ )
snake_case : List[Any] = config
snake_case : Optional[int] = BertEmbeddings(snake_case__ )
snake_case : Tuple = DeeBertEncoder(snake_case__ )
snake_case : Any = BertPooler(snake_case__ )
self.init_weights()
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> int:
'''simple docstring'''
return self.embeddings.word_embeddings
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = value
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(snake_case__ )
@add_start_docstrings_to_model_forward(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : str=None , snake_case__ : Any=None , snake_case__ : Any=None , snake_case__ : List[str]=None , snake_case__ : int=None , snake_case__ : Dict=None , ) -> Any:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
snake_case : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
snake_case : List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
snake_case : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case : Optional[int] = torch.ones(snake_case__ , device=snake_case__ )
if encoder_attention_mask is None:
snake_case : Union[str, Any] = torch.ones(snake_case__ , device=snake_case__ )
if token_type_ids is None:
snake_case : int = torch.zeros(snake_case__ , dtype=torch.long , device=snake_case__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case : torch.Tensor = self.get_extended_attention_mask(snake_case__ , snake_case__ , snake_case__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case : Union[str, Any] = encoder_attention_mask[:, None, None, :]
snake_case : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case : Dict = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case : Any = self.get_head_mask(snake_case__ , self.config.num_hidden_layers )
snake_case : int = self.embeddings(
input_ids=snake_case__ , position_ids=snake_case__ , token_type_ids=snake_case__ , inputs_embeds=snake_case__ )
snake_case : int = self.encoder(
snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
snake_case : List[Any] = encoder_outputs[0]
snake_case : List[Any] = self.pooler(snake_case__ )
snake_case : int = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase ( A_ ):
def __init__(self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Any = message
snake_case : Any = exit_layer # start from 1!
class UpperCAmelCase ( nn.Module ):
def __init__(self : Dict , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__()
snake_case : Dict = BertPooler(snake_case__ )
snake_case : Tuple = nn.Dropout(config.hidden_dropout_prob )
snake_case : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict ) -> int:
'''simple docstring'''
snake_case : str = encoder_outputs[0]
snake_case : Union[str, Any] = self.pooler(snake_case__ )
# "return" pooler_output
# BertModel
snake_case : int = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case : Union[str, Any] = bmodel_output[1]
snake_case : Optional[int] = self.dropout(snake_case__ )
snake_case : str = self.classifier(snake_case__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " ,A_ ,)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(snake_case__ )
snake_case : Any = config.num_labels
snake_case : int = config.num_hidden_layers
snake_case : Dict = DeeBertModel(snake_case__ )
snake_case : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case : int = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None , snake_case__ : List[Any]=None , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=-1 , snake_case__ : Optional[int]=False , ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.num_layers
try:
snake_case : Optional[Any] = self.bert(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case : List[str] = outputs[1]
snake_case : List[str] = self.dropout(snake_case__ )
snake_case : Tuple = self.classifier(snake_case__ )
snake_case : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case : Tuple = e.message
snake_case : Optional[int] = e.exit_layer
snake_case : Tuple = outputs[0]
if not self.training:
snake_case : Optional[int] = entropy(snake_case__ )
snake_case : str = []
snake_case : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case : Dict = MSELoss()
snake_case : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case : Union[str, Any] = CrossEntropyLoss()
snake_case : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case : str = []
for highway_exit in outputs[-1]:
snake_case : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case : List[str] = MSELoss()
snake_case : List[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case : str = CrossEntropyLoss()
snake_case : int = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case__ )
if train_highway:
snake_case : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case : str = (loss,) + outputs
if not self.training:
snake_case : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCamelCase = random.Random()
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None ):
if rng is None:
snake_case : Optional[Any] = global_rng
snake_case : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : Dict , snake_case__ : List[str] , snake_case__ : List[str]=7 , snake_case__ : Optional[int]=4_00 , snake_case__ : Optional[int]=20_00 , snake_case__ : Dict=1 , snake_case__ : Dict=0.0 , snake_case__ : Union[str, Any]=1_60_00 , snake_case__ : str=True , snake_case__ : Optional[Any]=True , ) -> Dict:
'''simple docstring'''
snake_case : str = parent
snake_case : List[Any] = batch_size
snake_case : int = min_seq_length
snake_case : Dict = max_seq_length
snake_case : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case : str = feature_size
snake_case : List[Any] = padding_value
snake_case : Union[str, Any] = sampling_rate
snake_case : List[str] = return_attention_mask
snake_case : int = do_normalize
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str=False , snake_case__ : Optional[int]=False ) -> Dict:
'''simple docstring'''
def _flatten(snake_case__ : List[str] ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
snake_case : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case : int = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : List[Any] = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : int = WavaVecaFeatureExtractor
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Any = WavaVecaFeatureExtractionTester(self )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(snake_case__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case__ , axis=0 ) - 1 ) < 1e-3 ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case : List[str] = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
snake_case : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
snake_case : Any = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test batched
snake_case : Union[str, Any] = feat_extract(snake_case__ , return_tensors="np" ).input_values
snake_case : Any = feat_extract(snake_case__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case : Any = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case : Union[str, Any] = np.asarray(snake_case__ )
snake_case : Any = feat_extract(snake_case__ , return_tensors="np" ).input_values
snake_case : List[str] = feat_extract(snake_case__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case : int = ["longest", "max_length", "do_not_pad"]
snake_case : List[str] = [None, 16_00, None]
for max_length, padding in zip(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = feat_extract(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors="np" )
snake_case : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Tuple = range(8_00 , 14_00 , 2_00 )
snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in lengths]
snake_case : Tuple = ["longest", "max_length", "do_not_pad"]
snake_case : Optional[Any] = [None, 16_00, None]
for max_length, padding in zip(snake_case__ , snake_case__ ):
snake_case : Any = feat_extract(snake_case__ , max_length=snake_case__ , padding=snake_case__ )
snake_case : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case : Tuple = feat_extract(
snake_case__ , truncation=snake_case__ , max_length=10_00 , padding="max_length" , return_tensors="np" )
snake_case : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case : Optional[Any] = feat_extract(
snake_case__ , truncation=snake_case__ , max_length=10_00 , padding="longest" , return_tensors="np" )
snake_case : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case : Optional[Any] = feat_extract(
snake_case__ , truncation=snake_case__ , max_length=20_00 , padding="longest" , return_tensors="np" )
snake_case : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
import torch
snake_case : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Any = np.random.rand(1_00 ).astype(np.floataa )
snake_case : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case : Any = WavaVecaConfig.from_pretrained(snake_case__ )
snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
from functools import lru_cache
@lru_cache
def UpperCamelCase ( __lowerCamelCase : int ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
import numpy as np
def UpperCamelCase ( __lowerCamelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase ( __lowerCamelCase : np.array ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : int ):
snake_case : List[str] = nn.functional.normalize(__lowerCamelCase )
snake_case : Dict = nn.functional.normalize(__lowerCamelCase )
return torch.mm(__lowerCamelCase , normalized_text_embeds.t() )
class UpperCAmelCase ( A_ ):
A__ : int = CLIPConfig
A__ : List[Any] = ["CLIPEncoderLayer"]
def __init__(self : List[str] , snake_case__ : CLIPConfig ) -> Optional[Any]:
'''simple docstring'''
super().__init__(snake_case__ )
snake_case : Any = CLIPVisionModel(config.vision_config )
snake_case : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ )
snake_case : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ )
snake_case : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ )
snake_case : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ )
snake_case : str = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : str = self.vision_model(snake_case__ )[1] # pooled_output
snake_case : List[str] = self.visual_projection(snake_case__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case : Union[str, Any] = cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy()
snake_case : List[Any] = cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy()
snake_case : int = []
snake_case : Optional[int] = image_embeds.shape[0]
for i in range(snake_case__ ):
snake_case : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case : Union[str, Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case : List[str] = special_cos_dist[i][concept_idx]
snake_case : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case : List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
snake_case : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case : Optional[Any] = cos_dist[i][concept_idx]
snake_case : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
snake_case : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case__ )
result.append(snake_case__ )
snake_case : Union[str, Any] = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.vision_model(snake_case__ )[1] # pooled_output
snake_case : Optional[Any] = self.visual_projection(snake_case__ )
snake_case : Any = cosine_distance(snake_case__ , self.special_care_embeds )
snake_case : Any = cosine_distance(snake_case__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case : int = 0.0
snake_case : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case : Union[str, Any] = torch.any(special_scores > 0 , dim=1 )
snake_case : Tuple = special_care * 0.01
snake_case : Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case : Tuple = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : str = "audio-spectrogram-transformer"
def __init__(self : List[str] , snake_case__ : int=7_68 , snake_case__ : Optional[Any]=12 , snake_case__ : int=12 , snake_case__ : List[str]=30_72 , snake_case__ : str="gelu" , snake_case__ : List[str]=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : Dict=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Optional[int]=16 , snake_case__ : int=True , snake_case__ : Dict=10 , snake_case__ : List[Any]=10 , snake_case__ : Dict=10_24 , snake_case__ : Tuple=1_28 , **snake_case__ : List[Any] , ) -> Any:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : Optional[int] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : Optional[Any] = layer_norm_eps
snake_case : Dict = patch_size
snake_case : Optional[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : str = time_stride
snake_case : Optional[int] = max_length
snake_case : Optional[int] = num_mel_bins
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase :
def __init__(self : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str]=13 , snake_case__ : Optional[int]=7 , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : Optional[Any]=99 , snake_case__ : Dict=32 , snake_case__ : List[str]=2 , snake_case__ : List[str]=4 , snake_case__ : Tuple=37 , snake_case__ : List[str]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : Optional[int]=3 , snake_case__ : str=4 , snake_case__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Any = parent
snake_case : str = 13
snake_case : str = 7
snake_case : Optional[Any] = True
snake_case : Tuple = True
snake_case : Optional[Any] = True
snake_case : Optional[Any] = True
snake_case : List[Any] = 99
snake_case : Any = 32
snake_case : int = 2
snake_case : Optional[int] = 4
snake_case : str = 37
snake_case : Tuple = "gelu"
snake_case : Optional[int] = 0.1
snake_case : Tuple = 0.1
snake_case : Union[str, Any] = 5_12
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = 2
snake_case : Optional[Any] = 0.02
snake_case : Any = 3
snake_case : Optional[Any] = 4
snake_case : Union[str, Any] = None
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Dict = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Any = None
if self.use_token_type_ids:
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = None
snake_case : List[str] = None
snake_case : int = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = TFRoFormerModel(config=snake_case__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case : List[str] = [input_ids, input_mask]
snake_case : Optional[int] = model(snake_case__ )
snake_case : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : Tuple = TFRoFormerForCausalLM(config=snake_case__ )
snake_case : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : List[str] = model(snake_case__ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = TFRoFormerForMaskedLM(config=snake_case__ )
snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ) -> Tuple:
'''simple docstring'''
snake_case : str = self.num_labels
snake_case : Dict = TFRoFormerForSequenceClassification(config=snake_case__ )
snake_case : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : Dict = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case : str = self.num_choices
snake_case : List[Any] = TFRoFormerForMultipleChoice(config=snake_case__ )
snake_case : Tuple = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Optional[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : int = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = self.num_labels
snake_case : str = TFRoFormerForTokenClassification(config=snake_case__ )
snake_case : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : int , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = TFRoFormerForQuestionAnswering(config=snake_case__ )
snake_case : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : int = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
snake_case
) : int = config_and_inputs
snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : Dict = TFRoFormerModelTester(self )
snake_case : int = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(snake_case__ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : Tuple = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : Optional[int] = model(snake_case__ )[0]
# TODO Replace vocab size
snake_case : Optional[Any] = 5_00_00
snake_case : Optional[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
snake_case : Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
A__ : Dict = 1e-4
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : List[Any] = tf.constant([[4, 10]] )
snake_case : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
snake_case : List[str] = emba(input_ids.shape )
snake_case : Optional[int] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
snake_case : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
snake_case : int = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
A__ : str = 1e-4
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
snake_case : Union[str, Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
snake_case : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
snake_case : Optional[Any] = embed_positions([2, 16, 7_68] )[None, None, :, :]
snake_case : Tuple = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case__ , snake_case__ , snake_case__ )
snake_case : List[str] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
snake_case : Optional[Any] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.