code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __A ( a_ :List[str]) -> str:
__a : str = tmp_path / '''file.csv'''
__a : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :Dict) -> str:
__a : Any = tmp_path / '''malformed_file.csv'''
__a : str = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :List[str] , a_ :Any) -> Union[str, Any]:
__a : List[str] = tmp_path / '''csv_with_image.csv'''
__a : List[str] = textwrap.dedent(
F"""\
image
{image_file}
""")
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :List[Any]) -> str:
__a : int = tmp_path / '''csv_with_label.csv'''
__a : Tuple = textwrap.dedent(
'''\
label
good
bad
good
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :Optional[Any]) -> Tuple:
__a : List[str] = tmp_path / '''csv_with_int_list.csv'''
__a : List[Any] = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
def __A ( a_ :Dict , a_ :List[str] , a_ :Optional[Any]) -> Any:
__a : Optional[Any] = Csv()
__a : Tuple = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(a_ , match='''Error tokenizing data'''):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a_) in record.message
for record in caplog.records)
@require_pil
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf-8''') as f:
__a : str = f.read().splitlines()[1]
__a : Union[str, Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()}))
__a : Union[str, Any] = csv._generate_tables([[csv_file_with_image]])
__a : Optional[int] = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field('''image''').type == Image()()
__a : List[Any] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __A ( a_ :List[Any]) -> List[str]:
with open(a_ , encoding='''utf-8''') as f:
__a : List[str] = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''])}))
__a : List[str] = csv._generate_tables([[csv_file_with_label]])
__a : str = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field('''label''').type == ClassLabel(names=['''good''', '''bad'''])()
__a : Any = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad''']).straint(a_) for label in labels]
def __A ( a_ :Tuple) -> str:
__a : Tuple = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a_: [int(a_) for i in x.split()]})
__a : Any = csv._generate_tables([[csv_file_with_int_list]])
__a : Optional[Any] = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field('''int_list''').type)
__a : Dict = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 160 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowercase__ : int = 'config.json'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin'
lowercase__ : List[str] = 'diffusion_flax_model.msgpack'
lowercase__ : str = 'model.onnx'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
lowercase__ : List[str] = 'weights.pb'
lowercase__ : str = 'https://huggingface.co'
lowercase__ : str = default_cache_path
lowercase__ : Optional[int] = 'diffusers_modules'
lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase__ : Tuple = ['fp16', 'non-ema']
lowercase__ : int = '.self_attn'
| 324 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__a = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def A_ ( _lowercase ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case_ :int = k.replace(_lowercase, _lowercase )
return k
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = DEFAULTS.copy()
cfg_kwargs.update(_lowercase )
snake_case_ :int = PegasusConfig(**_lowercase )
snake_case_ :Union[str, Any] = PegasusForConditionalGeneration(_lowercase )
snake_case_ :Optional[Any] = torch_model.model.state_dict()
snake_case_ :Tuple = {}
for k, v in tf_weights.items():
snake_case_ :Any = rename_state_dict_key(_lowercase )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case_ :Any = v.T
snake_case_ :List[Any] = torch.tensor(_lowercase, dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case_ :List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case_ :str = mapping["""shared.weight"""]
snake_case_ :Any = mapping["""shared.weight"""]
snake_case_ :Optional[int] = {k: torch.zeros_like(_lowercase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**_lowercase )
snake_case_, snake_case_ :Tuple = torch_model.model.load_state_dict(_lowercase, strict=_lowercase )
snake_case_ :List[str] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( _lowercase="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
snake_case_ :str = tf.train.list_variables(_lowercase )
snake_case_ :Dict = {}
snake_case_ :str = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(_lowercase, desc="""converting tf checkpoint to dict""" ):
snake_case_ :Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ :Optional[int] = tf.train.load_variable(_lowercase, _lowercase )
snake_case_ :str = array
return tf_weights
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = Path(_lowercase ).parent.name
snake_case_ :Any = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case_ :List[Any] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""", model_max_length=_lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowercase )
# convert model
snake_case_ :int = get_tf_weights_as_numpy(_lowercase )
snake_case_ :str = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case_ :List[Any] = task_specific_params
snake_case_ :Dict = convert_pegasus(_lowercase, _lowercase )
torch_model.save_pretrained(_lowercase )
snake_case_ :Union[str, Any] = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(_lowercase, Path(_lowercase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__a = parser.parse_args()
if args.save_dir is None:
__a = Path(args.tf_ckpt_path).parent.name
__a = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 66 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
if k.startswith('''encoder''' ):
_UpperCamelCase = k.replace('''.attn''', '''.self_attn''' )
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' )
return k
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCamelCase = sd.pop(lowercase )
_UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
_UpperCamelCase = v
lowercase__ : str = ['START']
@torch.no_grad()
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = model['''model''']
_UpperCamelCase = BlenderbotConfig.from_json_file(lowercase )
_UpperCamelCase = BlenderbotForConditionalGeneration(lowercase )
_UpperCamelCase = m.model.state_dict().keys()
_UpperCamelCase = []
_UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowercase )
m.model.load_state_dict(lowercase, strict=lowercase )
m.half()
m.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 324 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a__ ( ) -> int:
UpperCAmelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCAmelCase : List[str] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(UpperCAmelCase )
# Let's go
UpperCAmelCase : Tuple = parser.parse_args()
if not hasattr(UpperCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase : Tuple = args.func(UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 336 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 0 |
"""simple docstring"""
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def a__ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> list[int]:
_lowerCAmelCase : Tuple = [0] * no_of_processes
_lowerCAmelCase : Optional[int] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = burst_time[i]
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Dict = 999999999
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : int = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowerCAmelCase : Optional[int] = remaining_time[j]
_lowerCAmelCase : Tuple = j
_lowerCAmelCase : Optional[Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowerCAmelCase : Optional[int] = remaining_time[short]
if minm == 0:
_lowerCAmelCase : str = 999999999
if remaining_time[short] == 0:
complete += 1
_lowerCAmelCase : Union[str, Any] = False
# Find finish time of current process
_lowerCAmelCase : str = increment_time + 1
# Calculate waiting time
_lowerCAmelCase : Union[str, Any] = finish_time - arrival_time[short]
_lowerCAmelCase : str = finar - burst_time[short]
if waiting_time[short] < 0:
_lowerCAmelCase : Optional[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Dict = [0] * no_of_processes
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> None:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = 0
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Any = total_waiting_time + waiting_time[i]
_lowerCAmelCase : Any = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("""Average turn around time =""" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
_a : str = int(input())
_a : Optional[int] = [0] * no_of_processes
_a : List[Any] = [0] * no_of_processes
_a : Dict = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
_a : Dict = map(int, input().split())
_a : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_a : int = burst_time
_a : List[str] = no_of_processes
_a : int = waiting_time
_a : Any = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_a : Optional[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 44 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify masks
_UpperCamelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
| 324 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int | str] ) -> None:
create_state_space_tree(UpperCAmelCase_ , [] , 0 , [0 for i in range(len(UpperCAmelCase_ ) )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int | str] , UpperCAmelCase_ : list[int | str] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , ) -> None:
if index == len(UpperCAmelCase_ ):
print(UpperCAmelCase_ )
return
for i in range(len(UpperCAmelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__lowerCamelCase : Tuple = True
create_state_space_tree(UpperCAmelCase_ , UpperCAmelCase_ , index + 1 , UpperCAmelCase_ )
current_sequence.pop()
__lowerCamelCase : Tuple = False
A__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
A__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 185 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : str = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase__ : Optional[int] = {
'google/rembert': 2_56,
}
lowercase__ : str = '▁'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = RemBertTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 324 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_SCREAMING_SNAKE_CASE : Optional[int] = 'src/diffusers'
_SCREAMING_SNAKE_CASE : Union[str, Any] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE : Tuple = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
_SCREAMING_SNAKE_CASE : int = spec.loader.load_module()
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return line.startswith(UpperCamelCase_ ) or len(UpperCamelCase_ ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' ,UpperCamelCase_ ) is not None
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = object_name.split('''.''' )
snake_case = 0
# First let's find the module where our object lives.
snake_case = parts[i]
while i < len(UpperCamelCase_ ) and not os.path.isfile(os.path.join(UpperCamelCase_ ,F'''{module}.py''' ) ):
i += 1
if i < len(UpperCamelCase_ ):
snake_case = os.path.join(UpperCamelCase_ ,parts[i] )
if i >= len(UpperCamelCase_ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCamelCase_ ,F'''{module}.py''' ) ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
snake_case = f.readlines()
# Now let's find the class / func in the code!
snake_case = ''''''
snake_case = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase_ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase_ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case = line_index
while line_index < len(UpperCamelCase_ ) and _should_continue(lines[line_index] ,UpperCamelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case = lines[start_index:line_index]
return "".join(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_SCREAMING_SNAKE_CASE : Dict = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
_SCREAMING_SNAKE_CASE : Any = re.compile(R"<FILL\s+[^>]*>")
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = code.split('''\n''' )
snake_case = 0
while idx < len(UpperCamelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase_ ):
return re.search(r'''^(\s*)\S''' ,lines[idx] ).groups()[0]
return ""
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = len(get_indent(UpperCamelCase_ ) ) > 0
if has_indent:
snake_case = F'''class Bla:\n{code}'''
snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 ,preview=UpperCamelCase_ )
snake_case = black.format_str(UpperCamelCase_ ,mode=UpperCamelCase_ )
snake_case , snake_case = style_docstrings_in_code(UpperCamelCase_ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
with open(UpperCamelCase_ ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
snake_case = f.readlines()
snake_case = []
snake_case = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase_ ):
snake_case = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case , snake_case , snake_case = search.groups()
snake_case = find_code_in_diffusers(UpperCamelCase_ )
snake_case = get_indent(UpperCamelCase_ )
snake_case = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case = theoretical_indent
snake_case = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case = True
while line_index < len(UpperCamelCase_ ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase_ ):
break
snake_case = lines[line_index]
snake_case = _should_continue(UpperCamelCase_ ,UpperCamelCase_ ) and re.search(F'''^{indent}# End copy''' ,UpperCamelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case = lines[start_index:line_index]
snake_case = ''''''.join(UpperCamelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(UpperCamelCase_ ) is None]
snake_case = '''\n'''.join(UpperCamelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase_ ) > 0:
snake_case = replace_pattern.replace('''with''' ,'''''' ).split(''',''' )
snake_case = [_re_replace_pattern.search(UpperCamelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case , snake_case , snake_case = pattern.groups()
snake_case = re.sub(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
if option.strip() == "all-casing":
snake_case = re.sub(obja.lower() ,obja.lower() ,UpperCamelCase_ )
snake_case = re.sub(obja.upper() ,obja.upper() ,UpperCamelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case = blackify(lines[start_index - 1] + theoretical_code )
snake_case = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case = start_index + 1
if overwrite and len(UpperCamelCase_ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCamelCase_ ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(UpperCamelCase_ )
return diffs
def UpperCAmelCase__ (UpperCamelCase_ = False ):
"""simple docstring"""
snake_case = glob.glob(os.path.join(UpperCamelCase_ ,'''**/*.py''' ) ,recursive=UpperCamelCase_ )
snake_case = []
for filename in all_files:
snake_case = is_copy_consistent(UpperCamelCase_ ,UpperCamelCase_ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCamelCase_ ) > 0:
snake_case = '''\n'''.join(UpperCamelCase_ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 127 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 | 0 |
"""simple docstring"""
from PIL import Image
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ , lowercase__ : Dict = image.size
lowercase__ : str = 0
lowercase__ : Optional[int] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
lowercase__ : Optional[int] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
lowercase__ : Optional[int] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase : str = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 77 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]:
"""simple docstring"""
_UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(lowercase ) ):
_UpperCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
lowercase, key=lowercase, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 324 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a = 16
_a = 32
def __A ( __lowerCAmelCase , __lowerCAmelCase = 16 )-> int:
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
_UpperCAmelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
__lowerCAmelCase , padding='longest' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a = mocked_dataloaders # noqa: F811
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowerCAmelCase ) == "1":
_UpperCAmelCase = 2
# New Code #
_UpperCAmelCase = int(args.gradient_accumulation_steps )
_UpperCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
_UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config['lr']
_UpperCAmelCase = int(config['num_epochs'] )
_UpperCAmelCase = int(config['seed'] )
_UpperCAmelCase = int(config['batch_size'] )
_UpperCAmelCase = evaluate.load('glue' , 'mrpc' )
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
with LocalSGD(
accelerator=__lowerCAmelCase , model=__lowerCAmelCase , local_sgd_steps=__lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
_UpperCAmelCase = model(**__lowerCAmelCase )
_UpperCAmelCase = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __lowerCAmelCase )
def __A ( )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowerCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=__lowerCAmelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 39 |
'''simple docstring'''
import os
import numpy
import onnx
def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase, lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
_graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, lowercase, lowercase )
def a__ ( lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.dirname(lowercase )
_UpperCamelCase = os.path.basename(lowercase )
_UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(lowercase )
dup_set.add(lowercase )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', lowercase )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
_UpperCamelCase = sorted(lowercase )
_remove_dup_initializers_from_model(lowercase, lowercase, lowercase )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowercase, lowercase )
onnx.save(lowercase, lowercase )
return new_model
| 324 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a :
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'The input training data file (a text file).'} )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
_snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Whether ot not to use whole word mask.'} )
_snake_case : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
_snake_case : float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
_snake_case : int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
_snake_case : int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ,lowercase = None ,):
"""simple docstring"""
def _dataset(lowercase ,lowercase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowercase ,file_path=lowercase ,block_size=args.block_size ,ref_path=lowercase ,)
return LineByLineTextDataset(tokenizer=lowercase ,file_path=lowercase ,block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase ,file_path=lowercase ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=lowercase ,)
if evaluate:
return _dataset(args.eval_data_file ,args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file ,args.train_ref_file )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,)
else:
logger.info("""Training new model from scratch""" )
_UpperCAmelCase = AutoModelWithLMHead.from_config(lowercase )
model.resize_token_embeddings(len(lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_UpperCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCAmelCase = min(data_args.block_size ,tokenizer.max_len )
# Get datasets
_UpperCAmelCase = (
get_dataset(lowercase ,tokenizer=lowercase ,cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCAmelCase = (
get_dataset(lowercase ,tokenizer=lowercase ,evaluate=lowercase ,cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,)
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCAmelCase = DataCollatorForWholeWordMask(
tokenizer=lowercase ,mlm_probability=data_args.mlm_probability )
else:
_UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowercase ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=lowercase ,args=lowercase ,data_collator=lowercase ,train_dataset=lowercase ,eval_dataset=lowercase ,prediction_loss_only=lowercase ,)
# Training
if training_args.do_train:
_UpperCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output["""eval_loss"""] )
_UpperCAmelCase = {"""perplexity""": perplexity}
_UpperCAmelCase = os.path.join(training_args.output_dir ,"""eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowercase ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" ,lowercase ,str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowercase )
return results
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 289 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
A = logging.getLogger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
super().__init__(
lowerCAmelCase__ , question_encoder_tokenizer=lowerCAmelCase__ , generator_tokenizer=lowerCAmelCase__ , index=lowerCAmelCase__ , init_retrieval=lowerCAmelCase__ , )
__a : Any = None
def _lowerCamelCase ( self , _UpperCAmelCase ):
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
__a : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a : Any = str(distributed_port + 1 )
__a : Optional[int] = dist.new_group(ranks=lowerCAmelCase__ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowerCamelCase ( self ):
return dist.get_rank(group=self.process_group ) == 0
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=torch.floataa ):
__a : Tuple = torch.empty(lowerCAmelCase__ , dtype=lowerCAmelCase__ )
dist.scatter(lowerCAmelCase__ , src=0 , scatter_list=lowerCAmelCase__ , group=self.process_group )
return target_tensor
def _lowerCamelCase ( self ):
__a : Tuple = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a : Union[str, Any] = next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCAmelCase__ )
return ifname
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
if not dist.is_initialized():
__a , __a : Union[str, Any] = self._main_retrieve(lowerCAmelCase__ , lowerCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase__ )
# distributed training
__a : Tuple = dist.get_world_size(group=self.process_group )
# gather logic
__a : Dict = None
if self._is_main():
__a : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase__ )]
dist.gather(torch.tensor(lowerCAmelCase__ ) , dst=0 , gather_list=lowerCAmelCase__ , group=self.process_group )
# scatter logic
__a : Optional[int] = question_hidden_states.shape[0]
__a : Union[str, Any] = []
__a : Any = []
if self._is_main():
assert len(lowerCAmelCase__ ) == world_size
__a , __a : List[Any] = self._main_retrieve(torch.cat(lowerCAmelCase__ ).numpy() , lowerCAmelCase__ )
__a , __a : Any = torch.tensor(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
__a : Any = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Dict = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Dict = self._scattered(lowerCAmelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
__a : int = self._scattered(lowerCAmelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase__ ) | 160 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 324 | 0 |
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = abs(_lowercase )
snake_case_ :Tuple = 0
while n > 0:
res += n % 10
n //= 10
return res
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :int = abs(_lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A_ ( _lowercase ):
'''simple docstring'''
return sum(int(_lowercase ) for c in str(abs(_lowercase ) ) )
def A_ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowercase, _lowercase ) -> None:
snake_case_ :Any = f"""{func.__name__}({value})"""
snake_case_ :Union[str, Any] = timeit(f"""__main__.{call}""", setup="""import __main__""" )
print(f"""{call:56} = {func(_lowercase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowercase, _lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 66 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : jnp.ndarray
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_snake_case : int = 3_2
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_2_8_0
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 )
_UpperCamelCase = self.time_proj(lowerCAmelCase__ )
_UpperCamelCase = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(lowerCAmelCase__ )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
_UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ )
_UpperCamelCase = nn.silu(lowerCAmelCase__ )
_UpperCamelCase = self.conv_out(lowerCAmelCase__ )
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 324 | 0 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[float, list[float]]:
UpperCAmelCase : List[Any] = list(range(len(UpperCAmelCase ) ) )
UpperCAmelCase : Dict = [v / w for v, w in zip(UpperCAmelCase , UpperCAmelCase )]
index.sort(key=lambda UpperCAmelCase : ratio[i] , reverse=UpperCAmelCase )
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Optional[int] = [0] * len(UpperCAmelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase : str = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Dict = logging.getLogger()
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
return json.load(lowercase )
raise ValueError(F"""can't find {path}""" )
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_glue.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_clm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_summarization_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_ner.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_qa.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 324 | 0 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowerCAmelCase : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCAmelCase : Tuple = float(factorial(SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 108 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : TreeNode | None = None
_UpperCamelCase : TreeNode | None = None
_a : int = namedtuple('CoinsDistribResult', 'moves excess')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(_lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCamelCase ) != count_coins(_lowerCamelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_lowerCamelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = get_distrib(node.left )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_distrib(node.right )
_lowerCAmelCase : List[str] = 1 - left_distrib_excess
_lowerCAmelCase : Any = 1 - right_distrib_excess
_lowerCAmelCase : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCamelCase )
+ abs(_lowerCamelCase )
)
_lowerCAmelCase : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCamelCase ,_lowerCamelCase )
return get_distrib(_lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_UpperCamelCase = iter(lowercase )
while True:
_UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) )
if not chunk:
return
yield chunk
def a__ ( lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCamelCase = ''''''
if len(lowercase ) < 2:
return dirty
for i in range(len(lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase ) & 1:
clean += "X"
return clean
def a__ ( lowercase : str ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase )
return table
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = prepare_input(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 324 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A__ : Any = logging.getLogger(__name__)
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
A__ : List[Any] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
A__ : Union[str, Any] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
A__ : Tuple = Counter()
for tk_ids in data:
counter.update(tk_ids)
A__ : List[Any] = [0] * args.vocab_size
for k, v in counter.items():
A__ : Tuple = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 185 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {'vocab_file': 'spiece.model'}
lowercase__ : Dict = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowercase__ : Optional[Any] = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ['input_ids', 'attention_mask']
_snake_case : List[int] = []
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCamelCase = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ )
_UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase = []
_UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
_UpperCamelCase = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) )
else:
_UpperCamelCase = ''''''.join(lowerCAmelCase__ )
_UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 324 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A__ ( unittest.TestCase , snake_case__ ):
"""simple docstring"""
def a_ ( self ):
snake_case = load_tool('''text-to-speech''' )
self.tool.setup()
def a_ ( self ):
torch.manual_seed(0 )
snake_case = self.tool('''hey''' )
snake_case = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def a_ ( self ):
torch.manual_seed(0 )
snake_case = self.tool('''hey''' )
snake_case = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 127 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'audio-spectrogram-transformer'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = patch_size
_UpperCamelCase = qkv_bias
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
| 324 | 0 |
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = 2
lowercase__ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Dict = 'ResNetConfig'
# Base docstring
lowercase__ : str = 'microsoft/resnet-50'
lowercase__ : Tuple = [1, 20_48, 7, 7]
# Image classification docstring
lowercase__ : Optional[Any] = 'microsoft/resnet-50'
lowercase__ : List[str] = 'tiger cat'
lowercase__ : List[Any] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_UpperCamelCase = config.num_channels
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
return embedding
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = out_channels // reduction
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = input
for layer in self.layers:
_UpperCamelCase = layer(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(lowerCAmelCase__ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = ResNetConfig
_snake_case : Union[str, Any] = 'resnet'
_snake_case : Optional[int] = 'pixel_values'
_snake_case : int = True
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = value
lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = ResNetModel(lowerCAmelCase__ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(lowerCAmelCase__ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
_UpperCamelCase = [config.embedding_size] + config.hidden_sizes
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCamelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
| 324 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> Union[str, Any]:
lowercase__ : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__lowerCamelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__lowerCamelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__lowerCamelCase )
return parser.parse_args()
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : str = parse_args()
# Import training_script as a module.
lowercase__ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ : str = script_fpath.stem
lowercase__ : List[Any] = importlib.import_module(__lowerCamelCase )
# Patch sys.argv
lowercase__ : Union[str, Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 16 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
_UpperCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = after_output[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = to_atuple(vision_model.config.image_size )
_UpperCamelCase = to_atuple(vision_model.config.patch_size )
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
_UpperCamelCase = inputs_dict
_UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple()
_UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
_UpperCamelCase = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_inputs_dict.pop('''vision_config''' )
_UpperCamelCase = config_inputs_dict.pop('''text_config''' )
_UpperCamelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs()
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = after_outputs[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxViTModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaxViTModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCamelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
| 324 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = ReformerTokenizer
UpperCamelCase__ = ReformerTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = ReformerTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<s>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCAmelCase__ ) , 1000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# Simple input
_UpperCAmelCase = 'This is a simple input'
_UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase = ('This is a simple input', 'This is a pair')
_UpperCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' , )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ReformerTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'Hello World!'
_UpperCAmelCase = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_UpperCAmelCase = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase = ' '.join(lowerCAmelCase__ )
_UpperCAmelCase = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors='pt' )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
_UpperCAmelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_UpperCAmelCase = encoded_sequence['input_ids'].shape
_UpperCAmelCase = ReformerModel(lowerCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_UpperCAmelCase = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=lowerCAmelCase__ , sequences=lowerCAmelCase__ , )
| 39 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324 | 0 |
"""simple docstring"""
UpperCAmelCase__ = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while place < len(lowercase ):
if (place + 1 < len(lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for arabic, roman in ROMAN:
((_UpperCAmelCase) , (_UpperCAmelCase)) = divmod(lowercase ,lowercase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 'vision-encoder-decoder'
__lowerCAmelCase = True
def __init__( self , **_UpperCAmelCase ):
super().__init__(**lowerCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
__a : Optional[Any] = kwargs.pop('''encoder''' )
__a : str = encoder_config.pop('''model_type''' )
__a : Dict = kwargs.pop('''decoder''' )
__a : Optional[int] = decoder_config.pop('''model_type''' )
__a : List[str] = AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__ )
__a : List[Any] = AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__ )
__a : List[Any] = True
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__a : Tuple = True
__a : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase__ )
def _lowerCamelCase ( self ):
__a : Tuple = copy.deepcopy(self.__dict__ )
__a : int = self.encoder.to_dict()
__a : Union[str, Any] = self.decoder.to_dict()
__a : str = self.__class__.model_type
return output
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
@property
def _lowerCamelCase ( self ):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
__a : Tuple = OrderedDict()
__a : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__a : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__a : int = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
import torch
__a : Optional[Any] = OrderedDict()
__a : List[Any] = super().generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
__a , __a : Optional[Any] = dummy_input['''input_ids'''].shape
__a : List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
__a : List[str] = dummy_input.pop('''input_ids''' )
__a : str = dummy_input.pop('''attention_mask''' )
__a : Tuple = torch.zeros(lowerCAmelCase__ )
return common_inputs
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self , _UpperCAmelCase ):
return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase__ )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ):
__a : Optional[int] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase__ , lowerCAmelCase__ ) | 160 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowercase__ : int = 'config.json'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin'
lowercase__ : List[str] = 'diffusion_flax_model.msgpack'
lowercase__ : str = 'model.onnx'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
lowercase__ : List[str] = 'weights.pb'
lowercase__ : str = 'https://huggingface.co'
lowercase__ : str = default_cache_path
lowercase__ : Optional[int] = 'diffusers_modules'
lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase__ : Tuple = ['fp16', 'non-ema']
lowercase__ : int = '.self_attn'
| 324 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = 'gpt_neo'
_A : List[str] = ['past_key_values']
_A : List[Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: Optional[Any] , snake_case: Union[str, Any]=50_257 , snake_case: List[str]=2_048 , snake_case: List[str]=2_048 , snake_case: Any=24 , snake_case: List[str]=[[["global", "local"], 12]] , snake_case: Dict=16 , snake_case: int=None , snake_case: Optional[Any]=256 , snake_case: str="gelu_new" , snake_case: Optional[Any]=0.0 , snake_case: Any=0.0 , snake_case: List[str]=0.0 , snake_case: int=0.1 , snake_case: Optional[int]=1E-5 , snake_case: List[Any]=0.0_2 , snake_case: Any=True , snake_case: List[Any]=50_256 , snake_case: List[str]=50_256 , **snake_case: str , ) -> Optional[Any]:
snake_case_ :Tuple = vocab_size
snake_case_ :Optional[Any] = max_position_embeddings
snake_case_ :Any = hidden_size
snake_case_ :Any = num_layers
snake_case_ :str = num_heads
snake_case_ :int = intermediate_size
snake_case_ :List[Any] = window_size
snake_case_ :Union[str, Any] = activation_function
snake_case_ :Optional[Any] = resid_dropout
snake_case_ :Dict = embed_dropout
snake_case_ :Optional[Any] = attention_dropout
snake_case_ :List[str] = classifier_dropout
snake_case_ :int = layer_norm_epsilon
snake_case_ :Optional[Any] = initializer_range
snake_case_ :Union[str, Any] = use_cache
snake_case_ :int = bos_token_id
snake_case_ :Union[str, Any] = eos_token_id
snake_case_ :List[str] = attention_types
snake_case_ :Tuple = self.expand_attention_types_params(lowerCAmelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@staticmethod
def lowerCAmelCase_ ( snake_case: List[Any] ) -> Any:
snake_case_ :int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
import torch
snake_case_ :Any = input.size()
snake_case_ :Dict = len(_lowercase )
snake_case_ :Tuple = shape[dimension]
snake_case_ :List[Any] = torch.arange(0, _lowercase, _lowercase )
snake_case_ :Union[str, Any] = torch.div(sizedim - size, _lowercase, rounding_mode="""floor""" ) + 1
snake_case_ :int = torch.arange(_lowercase ) + low_indices[:min_length][:, None]
snake_case_ :List[str] = [slice(_lowercase )] * rank
snake_case_ :str = indices
snake_case_ :Optional[Any] = input[s]
snake_case_ :Tuple = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowercase )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
import torch
snake_case_ :List[str] = torch.arange(1, _lowercase )
snake_case_ :str = torch.remainder(_lowercase, _lowercase )
snake_case_ :Any = remainders == 0
snake_case_ :Optional[int] = candidates[divisor_indices]
snake_case_ :str = torch.max(_lowercase )
return largest_divisor, torch.div(_lowercase, _lowercase, rounding_mode="""floor""" )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
snake_case_ :Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
snake_case_ :Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case_ :Optional[int] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
return self._config.num_heads
def lowerCAmelCase_ ( self: Optional[int] , snake_case: PreTrainedTokenizer , snake_case: int = -1 , snake_case: int = -1 , snake_case: bool = False , snake_case: Optional[TensorType] = None , ) -> Mapping[str, Any]:
snake_case_ :str = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
snake_case_ :List[str] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_, snake_case_ :List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ :List[Any] = seqlen + 2
snake_case_ :Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ :Any = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
snake_case_ :Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
snake_case_ :Any = ordered_inputs["""attention_mask"""].dtype
snake_case_ :Dict = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self: Tuple ) -> int:
return 13
| 66 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
if k.startswith('''encoder''' ):
_UpperCamelCase = k.replace('''.attn''', '''.self_attn''' )
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' )
return k
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCamelCase = sd.pop(lowercase )
_UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
_UpperCamelCase = v
lowercase__ : str = ['START']
@torch.no_grad()
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = model['''model''']
_UpperCamelCase = BlenderbotConfig.from_json_file(lowercase )
_UpperCamelCase = BlenderbotForConditionalGeneration(lowercase )
_UpperCamelCase = m.model.state_dict().keys()
_UpperCamelCase = []
_UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowercase )
m.model.load_state_dict(lowercase, strict=lowercase )
m.half()
m.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 324 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCamelCase : int = logging.getLogger(__name__)
def a__ ( ) -> Optional[int]:
UpperCAmelCase : Dict = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=UpperCAmelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=UpperCAmelCase , type=UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=UpperCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCAmelCase : str = parser.parse_args()
return args
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
def fn(UpperCAmelCase : List[str] ):
return tokenizer(examples['''text'''] )
return fn
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCAmelCase : int = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCAmelCase : Union[str, Any] = tf.train.Features(feature=UpperCAmelCase )
UpperCAmelCase : Optional[int] = tf.train.Example(features=UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = example.SerializeToString()
records.append(UpperCAmelCase )
return records
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase : str = min(len(UpperCAmelCase ) , args.limit )
UpperCAmelCase : str = dataset.select(range(UpperCAmelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase : Optional[int] = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
else:
UpperCAmelCase : Any = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase : Optional[int] = tokenize_function(UpperCAmelCase )
UpperCAmelCase : str = dataset.map(UpperCAmelCase , batched=UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase : Optional[Any] ):
# Concatenate all texts.
UpperCAmelCase : Union[str, Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase : Optional[int] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase : Tuple = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase : Tuple = dataset_tokenized.map(UpperCAmelCase , batched=UpperCAmelCase , batch_size=1_000 , num_proc=4 )
UpperCAmelCase : int = 0
UpperCAmelCase : List[Any] = 0
for shard in range(0 , len(UpperCAmelCase ) , args.shard_size ):
UpperCAmelCase : Optional[int] = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase : str = len(dataset_snapshot['''input_ids'''] )
UpperCAmelCase : Dict = os.path.join(UpperCAmelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
UpperCAmelCase : Union[str, Any] = get_serialized_examples(UpperCAmelCase )
with tf.io.TFRecordWriter(UpperCAmelCase ) as out_file:
for i in range(len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = serialized_examples[i]
out_file.write(UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(UpperCAmelCase , UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = parse_args()
main(args)
| 336 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def a__ ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 1_6 ):
'''simple docstring'''
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase : Optional[int] = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase : Tuple = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase : List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase : int = 8
else:
lowerCAmelCase : Optional[int] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
lowerCAmelCase : Optional[Any] = 2
# Initialize accelerator
lowerCAmelCase : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : Optional[Any] = config["lr"]
lowerCAmelCase : List[Any] = int(config["num_epochs"] )
lowerCAmelCase : str = int(config["seed"] )
lowerCAmelCase : Any = int(config["batch_size"] )
lowerCAmelCase : Any = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase : int = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase : Any = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Optional[int] = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase : str = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowerCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase : int = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = outputs.loss
lowerCAmelCase : List[str] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase : Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : str = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase : Dict = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase : Any = parser.parse_args()
lowerCAmelCase : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 108 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 0 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __A :
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__=0.2 , a__=0.2 ):
_lowerCAmelCase : Union[str, Any] = bp_numa
_lowerCAmelCase : Union[str, Any] = bp_numa
_lowerCAmelCase : Union[str, Any] = bp_numa
_lowerCAmelCase : Optional[int] = conva_get[:2]
_lowerCAmelCase : Dict = conva_get[2]
_lowerCAmelCase : int = size_pa
_lowerCAmelCase : Optional[int] = rate_w
_lowerCAmelCase : Tuple = rate_t
_lowerCAmelCase : Any = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowerCAmelCase : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase : int = -2 * np.random.rand(self.conva[1] ) + 1
_lowerCAmelCase : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
_lowerCAmelCase : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCAmelCase__ , """wb""" ) as f:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"Model saved: {save_path}" )
@classmethod
def __A ( cls , a__ ):
with open(lowerCAmelCase__ , """rb""" ) as f:
_lowerCAmelCase : Dict = pickle.load(lowerCAmelCase__ ) # noqa: S301
_lowerCAmelCase : List[Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
_lowerCAmelCase : Dict = model_dic.get("""size_pooling1""" )
_lowerCAmelCase : Dict = model_dic.get("""num_bp1""" )
_lowerCAmelCase : List[str] = model_dic.get("""num_bp2""" )
_lowerCAmelCase : List[Any] = model_dic.get("""num_bp3""" )
_lowerCAmelCase : str = model_dic.get("""rate_weight""" )
_lowerCAmelCase : List[Any] = model_dic.get("""rate_thre""" )
# create model instance
_lowerCAmelCase : Tuple = CNN(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# modify model parameter
_lowerCAmelCase : str = model_dic.get("""w_conv1""" )
_lowerCAmelCase : int = model_dic.get("""wkj""" )
_lowerCAmelCase : int = model_dic.get("""vji""" )
_lowerCAmelCase : Union[str, Any] = model_dic.get("""thre_conv1""" )
_lowerCAmelCase : Any = model_dic.get("""thre_bp2""" )
_lowerCAmelCase : Any = model_dic.get("""thre_bp3""" )
return conv_ins
def __A ( self , a__ ):
return 1 / (1 + np.exp(-1 * x ))
def __A ( self , a__ ):
return round(lowerCAmelCase__ , 3 )
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : str = convs[0]
_lowerCAmelCase : Dict = convs[1]
_lowerCAmelCase : str = np.shape(lowerCAmelCase__ )[0]
# get the data slice of original image data, data_focus
_lowerCAmelCase : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
_lowerCAmelCase : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase__ ):
_lowerCAmelCase : Tuple = []
for i_focus in range(len(lowerCAmelCase__ ) ):
_lowerCAmelCase : List[str] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase__ ) )
_lowerCAmelCase : List[str] = np.asmatrix(lowerCAmelCase__ ).reshape(
lowerCAmelCase__ , lowerCAmelCase__ )
data_featuremap.append(lowerCAmelCase__ )
# expanding the data slice to One dimenssion
_lowerCAmelCase : List[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase__ ) )
_lowerCAmelCase : Optional[int] = np.asarray(lowerCAmelCase__ )
return focus_list, data_featuremap
def __A ( self , a__ , a__ , a__="average_pool" ):
_lowerCAmelCase : int = len(featuremaps[0] )
_lowerCAmelCase : Optional[int] = int(size_map / size_pooling )
_lowerCAmelCase : List[str] = []
for i_map in range(len(lowerCAmelCase__ ) ):
_lowerCAmelCase : str = featuremaps[i_map]
_lowerCAmelCase : Tuple = []
for i_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCAmelCase : Optional[int] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase__ ) )
_lowerCAmelCase : Optional[Any] = np.asmatrix(lowerCAmelCase__ ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
featuremap_pooled.append(lowerCAmelCase__ )
return featuremap_pooled
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = []
for i in range(len(lowerCAmelCase__ ) ):
_lowerCAmelCase : Dict = np.shape(data[i] )
_lowerCAmelCase : Optional[int] = data[i].reshape(1 , shapes[0] * shapes[1] )
_lowerCAmelCase : int = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase__ )
_lowerCAmelCase : str = np.asarray(lowerCAmelCase__ )
return data_expanded
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = np.asarray(lowerCAmelCase__ )
_lowerCAmelCase : Tuple = np.shape(lowerCAmelCase__ )
_lowerCAmelCase : Dict = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = 0
for i_map in range(lowerCAmelCase__ ):
_lowerCAmelCase : Optional[int] = np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCAmelCase : List[str] = pd_pool[
i_pool
]
_lowerCAmelCase : str = i_pool + 1
_lowerCAmelCase : List[str] = np.multiply(
lowerCAmelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase__ )
return pd_all
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__=bool ):
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCAmelCase__ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCAmelCase__ )) )
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = 10000
while rp < n_repeat and mse >= error_accuracy:
_lowerCAmelCase : Tuple = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(lowerCAmelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
_lowerCAmelCase : List[Any] = np.asmatrix(datas_train[p] )
_lowerCAmelCase : Optional[Any] = np.asarray(datas_teach[p] )
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : str = self.pooling(lowerCAmelCase__ , self.size_poolinga )
_lowerCAmelCase : List[str] = np.shape(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = self._expand(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = data_bp_input
_lowerCAmelCase : Any = np.dot(lowerCAmelCase__ , self.vji.T ) - self.thre_bpa
_lowerCAmelCase : Dict = self.sig(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = np.dot(lowerCAmelCase__ , self.wkj.T ) - self.thre_bpa
_lowerCAmelCase : Any = self.sig(lowerCAmelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCAmelCase : Optional[int] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
_lowerCAmelCase : List[Any] = np.multiply(
np.dot(lowerCAmelCase__ , self.wkj ) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
_lowerCAmelCase : Optional[int] = np.dot(lowerCAmelCase__ , self.vji )
_lowerCAmelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCAmelCase : List[str] = pd_conva_pooled.T.getA().tolist()
_lowerCAmelCase : List[str] = self._calculate_gradient_from_pool(
lowerCAmelCase__ , lowerCAmelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowerCAmelCase : str = self._expand_mat(pd_conva_all[k_conv] )
_lowerCAmelCase : Any = self.rate_weight * np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowerCAmelCase : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowerCAmelCase : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCAmelCase : List[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCAmelCase : Union[str, Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCAmelCase : List[str] = rp + 1
_lowerCAmelCase : int = error_count / patterns
all_mse.append(lowerCAmelCase__ )
def draw_error():
_lowerCAmelCase : List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase__ , """+-""" )
plt.plot(lowerCAmelCase__ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCAmelCase__ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCAmelCase__ )) )
for p in range(len(lowerCAmelCase__ ) ):
_lowerCAmelCase : Any = np.asmatrix(datas_test[p] )
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : str = self.pooling(lowerCAmelCase__ , self.size_poolinga )
_lowerCAmelCase : str = self._expand(lowerCAmelCase__ )
_lowerCAmelCase : str = data_bp_input
_lowerCAmelCase : List[Any] = bp_outa * self.vji.T - self.thre_bpa
_lowerCAmelCase : Optional[int] = self.sig(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(lowerCAmelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
_lowerCAmelCase : List[str] = [list(map(self.do_round , lowerCAmelCase__ ) ) for each in produce_out]
return np.asarray(lowerCAmelCase__ )
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = np.asmatrix(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : Dict = self.pooling(lowerCAmelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 44 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify masks
_UpperCamelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
| 324 | 0 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
A__ : int = logging.getLogger(__name__)
A__ : List[Any] = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
lowerCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'The name of the task to train on.'} , )
lowerCamelCase : Optional[List[str]] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
lowerCamelCase : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
lowerCamelCase : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
lowerCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=_UpperCAmelCase , metadata={'help': 'Random seed for initialization.'} , )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__lowerCamelCase : str = dataset.filter(lambda UpperCAmelCase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowerCamelCase : List[Any] = int(eval_result * len(UpperCAmelCase_ ) )
print(UpperCAmelCase_ )
__lowerCamelCase : Any = dataset.sort('probability' , reverse=UpperCAmelCase_ )
__lowerCamelCase : Dict = dataset.select(range(UpperCAmelCase_ ) )
__lowerCamelCase : Union[str, Any] = dataset.remove_columns(['label', 'probability'] )
__lowerCamelCase : Union[str, Any] = dataset.rename_column('prediction' , 'label' )
__lowerCamelCase : Dict = dataset.map(lambda UpperCAmelCase_ : {"label": idalabel[example["label"]]} )
__lowerCamelCase : Optional[int] = dataset.shuffle(seed=args.seed )
__lowerCamelCase : int = os.path.join(UpperCAmelCase_ , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCAmelCase_ , index=UpperCAmelCase_ )
else:
dataset.to_json(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ) -> int:
__lowerCamelCase : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowerCamelCase : List[Any] = STModelArguments(model_name_or_path=UpperCAmelCase_ )
__lowerCamelCase : str = STDataArguments(train_file=UpperCAmelCase_ , infer_file=UpperCAmelCase_ )
__lowerCamelCase : Any = STTrainingArguments(output_dir=UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCAmelCase_ ).items():
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for key, value in kwargs.items():
if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Sanity checks
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowerCamelCase : Optional[Any] = args.train_file
__lowerCamelCase : Optional[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowerCamelCase : Dict = args.eval_file
for key in data_files:
__lowerCamelCase : List[str] = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__lowerCamelCase : Any = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__lowerCamelCase : Optional[Any] = F'{args.output_dir}/self-train_iter-{{}}'.format
__lowerCamelCase : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase_ )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
accelerator.wait_for_everyone()
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Any = None
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = False
# Show the progress bar
__lowerCamelCase : Union[str, Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__lowerCamelCase : str = data_dir_format(UpperCAmelCase_ )
assert os.path.exists(UpperCAmelCase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , 'stage-1' )
__lowerCamelCase : Union[str, Any] = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
arguments_dict.update({key: value} )
__lowerCamelCase : Optional[Any] = os.path.join(UpperCAmelCase_ , 'best-checkpoint' , UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , UpperCAmelCase_ , UpperCAmelCase_ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , UpperCAmelCase_ )
finetune(**UpperCAmelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCAmelCase_ )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , UpperCAmelCase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , 'best-checkpoint' )
__lowerCamelCase : Tuple = os.path.join(UpperCAmelCase_ , 'stage-2' )
# Update arguments_dict
__lowerCamelCase : List[str] = model_path
__lowerCamelCase : Tuple = data_files['train']
__lowerCamelCase : str = current_output_dir
__lowerCamelCase : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'best-checkpoint' , UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , UpperCAmelCase_ , UpperCAmelCase_ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , UpperCAmelCase_ )
finetune(**UpperCAmelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCAmelCase_ )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , UpperCAmelCase_ )
__lowerCamelCase : int = iteration
__lowerCamelCase : str = data_dir_format(iteration + 1 )
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'best-checkpoint' ) )
__lowerCamelCase : Optional[int] = config.idalabel
__lowerCamelCase : List[str] = os.path.join(UpperCAmelCase_ , 'eval_results_best-checkpoint.json' )
__lowerCamelCase : Optional[Any] = os.path.join(UpperCAmelCase_ , 'test_results_best-checkpoint.json' )
assert os.path.exists(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'r' ) as f:
__lowerCamelCase : Optional[Any] = float(json.load(UpperCAmelCase_ )[args.eval_metric] )
__lowerCamelCase : List[Any] = os.path.join(UpperCAmelCase_ , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(UpperCAmelCase_ )
# Loading the dataset from local csv or json files.
__lowerCamelCase : Union[str, Any] = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__lowerCamelCase : int = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
shutil.copy(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(UpperCAmelCase_ ):
shutil.copy(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.wait_for_everyone()
__lowerCamelCase : Optional[Any] = os.path.join(UpperCAmelCase_ , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowerCamelCase : Optional[int] = eval_result
if best_iteration is None:
__lowerCamelCase : Optional[int] = new_iteration
__lowerCamelCase : Dict = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowerCamelCase : Tuple = new_iteration
__lowerCamelCase : List[Any] = new_eval_result
__lowerCamelCase : List[str] = 0
else:
if new_eval_result == best_eval_result:
__lowerCamelCase : Any = new_iteration
__lowerCamelCase : int = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowerCamelCase : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , UpperCAmelCase_ )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , UpperCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCAmelCase_ , F'eval_results_iter-{iteration}.json' ) , os.path.join(UpperCAmelCase_ , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , UpperCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCAmelCase_ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(UpperCAmelCase_ , 'eval_results_best-iteration.json' ) , )
| 185 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : str = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase__ : Optional[int] = {
'google/rembert': 2_56,
}
lowercase__ : str = '▁'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = RemBertTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 324 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : int = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ['ConditionalDetrFeatureExtractor']
_SCREAMING_SNAKE_CASE : Any = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 | 0 |
"""simple docstring"""
from math import sqrt
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( _lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
lowercase__ : Any = 0
lowercase__ : int = 1
while count != nth and number < 3:
number += 1
if is_prime(_lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]:
"""simple docstring"""
_UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(lowercase ) ):
_UpperCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
lowercase, key=lowercase, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 324 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __A ( A_ ,A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = 'swin'
lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int ,_snake_case : str=224 ,_snake_case : Tuple=4 ,_snake_case : List[Any]=3 ,_snake_case : Union[str, Any]=96 ,_snake_case : Any=[2, 2, 6, 2] ,_snake_case : Tuple=[3, 6, 12, 24] ,_snake_case : Tuple=7 ,_snake_case : List[str]=4.0 ,_snake_case : Tuple=True ,_snake_case : Optional[Any]=0.0 ,_snake_case : int=0.0 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : Optional[Any]="gelu" ,_snake_case : Dict=False ,_snake_case : Optional[Any]=0.02 ,_snake_case : Optional[Any]=1e-5 ,_snake_case : int=32 ,_snake_case : Tuple=None ,_snake_case : Optional[Any]=None ,**_snake_case : int ,) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
lowercase__ : Tuple = image_size
lowercase__ : List[str] = patch_size
lowercase__ : List[Any] = num_channels
lowercase__ : Optional[Any] = embed_dim
lowercase__ : Union[str, Any] = depths
lowercase__ : List[Any] = len(lowerCAmelCase__ )
lowercase__ : int = num_heads
lowercase__ : Any = window_size
lowercase__ : List[Any] = mlp_ratio
lowercase__ : List[str] = qkv_bias
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[int] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ : Optional[int] = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowercase__ : Tuple = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 ,len(lowerCAmelCase__ ) + 1 )]
lowercase__ , lowercase__ : Dict = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ ,out_indices=lowerCAmelCase__ ,stage_names=self.stage_names )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = version.parse("1.11" )
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : str ) -> float:
"""simple docstring"""
return 1e-4
| 16 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_a = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_a = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_a = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_a = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_a = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_a = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_a = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def __A ( )-> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
_UpperCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_UpperCAmelCase , _UpperCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __A ( __lowerCAmelCase = 100 )-> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
"""simple docstring"""
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __A ( )-> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
_UpperCAmelCase = poker_hands.copy()
shuffle(__lowerCAmelCase )
_UpperCAmelCase = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __A ( )-> Dict:
"""simple docstring"""
_UpperCAmelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = PokerHand('2C 4S AS 3D 5C' )
_UpperCAmelCase = True
_UpperCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __A ( )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
_UpperCAmelCase = line[:14].strip()
_UpperCAmelCase = line[15:].strip()
_UpperCAmelCase , _UpperCAmelCase = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
_UpperCAmelCase = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 39 |
'''simple docstring'''
import os
import numpy
import onnx
def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase, lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
_graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, lowercase, lowercase )
def a__ ( lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.dirname(lowercase )
_UpperCamelCase = os.path.basename(lowercase )
_UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(lowercase )
dup_set.add(lowercase )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', lowercase )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
_UpperCamelCase = sorted(lowercase )
_remove_dup_initializers_from_model(lowercase, lowercase, lowercase )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowercase, lowercase )
onnx.save(lowercase, lowercase )
return new_model
| 324 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 289 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324 | 0 |
"""simple docstring"""
def __A ( a_ :list[int] , a_ :list[int]) -> None:
__a : Tuple = len(a_)
print('''The following activities are selected:''')
# The first activity is always selected
__a : Union[str, Any] = 0
print(a_ , end=''',''')
# Consider rest of the activities
for j in range(a_):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a_ , end=''',''')
__a : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A = [1, 3, 0, 5, 8, 5]
A = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 160 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 324 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__a = {
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
__a = '▁'
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Any = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ['input_ids', 'token_type_ids']
_A : Any = FNetTokenizer
def __init__( self: Tuple , snake_case: Optional[Any]=None , snake_case: int=None , snake_case: int=False , snake_case: List[Any]=True , snake_case: Any=True , snake_case: List[str]="<unk>" , snake_case: Optional[int]="[SEP]" , snake_case: int="<pad>" , snake_case: List[Any]="[CLS]" , snake_case: Union[str, Any]="[MASK]" , **snake_case: Optional[int] , ) -> Union[str, Any]:
snake_case_ :Any = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ :Optional[int] = do_lower_case
snake_case_ :List[str] = remove_space
snake_case_ :Optional[int] = keep_accents
snake_case_ :str = vocab_file
snake_case_ :int = False if not self.vocab_file else True
def lowerCAmelCase_ ( self: List[Any] , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :Any = [self.sep_token_id]
snake_case_ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self: Tuple , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :Union[str, Any] = [self.sep_token_id]
snake_case_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self: Dict , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 66 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : jnp.ndarray
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_snake_case : int = 3_2
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_2_8_0
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 )
_UpperCamelCase = self.time_proj(lowerCAmelCase__ )
_UpperCamelCase = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(lowerCAmelCase__ )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
_UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ )
_UpperCamelCase = nn.silu(lowerCAmelCase__ )
_UpperCamelCase = self.conv_out(lowerCAmelCase__ )
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 324 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(lowerCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids, lowerCAmelCase__ ) )
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint(lowerCAmelCase__ ) # fails here
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase : Dict = DisjunctiveConstraint(lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(1 )
UpperCAmelCase : str = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dc.update(2 )
UpperCAmelCase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = dc.update(3 )
UpperCAmelCase : str = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase : Any = DisjunctiveConstraint(lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 336 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Dict = logging.getLogger()
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
return json.load(lowercase )
raise ValueError(F"""can't find {path}""" )
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_glue.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_clm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_summarization_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_ner.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_qa.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 324 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , **snake_case__ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(lowerCAmelCase__ )
def __call__( self , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowerCAmelCase : List[str] = kwargs.pop("text_queries" )
if isinstance(lowerCAmelCase__ , (str, Image.Image) ):
lowerCAmelCase : Optional[int] = {"image": image, "candidate_labels": candidate_labels}
else:
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Union[str, Any] = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
return results
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = {}
if "threshold" in kwargs:
lowerCAmelCase : Union[str, Any] = kwargs["threshold"]
if "top_k" in kwargs:
lowerCAmelCase : Dict = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = load_image(inputs["image"] )
lowerCAmelCase : Union[str, Any] = inputs["candidate_labels"]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase : List[Any] = candidate_labels.split("," )
lowerCAmelCase : Optional[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowerCAmelCase__ ):
lowerCAmelCase : int = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework )
lowerCAmelCase : Optional[int] = self.image_processor(lowerCAmelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(lowerCAmelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = model_inputs.pop("target_size" )
lowerCAmelCase : str = model_inputs.pop("candidate_label" )
lowerCAmelCase : int = model_inputs.pop("is_last" )
lowerCAmelCase : int = self.model(**lowerCAmelCase__ )
lowerCAmelCase : Tuple = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self , snake_case__ , snake_case__=0.1 , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = []
for model_output in model_outputs:
lowerCAmelCase : Union[str, Any] = model_output["candidate_label"]
lowerCAmelCase : List[Any] = BaseModelOutput(lowerCAmelCase__ )
lowerCAmelCase : Any = self.image_processor.post_process_object_detection(
outputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase : int = outputs["scores"][index].item()
lowerCAmelCase : Optional[Any] = self._get_bounding_box(outputs["boxes"][index][0] )
lowerCAmelCase : List[str] = {"score": score, "label": label, "box": box}
results.append(lowerCAmelCase__ )
lowerCAmelCase : str = sorted(lowerCAmelCase__ , key=lambda snake_case__ : x["score"] , reverse=lowerCAmelCase__ )
if top_k:
lowerCAmelCase : Union[str, Any] = results[:top_k]
return results
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = box.int().tolist()
lowerCAmelCase : Union[str, Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 108 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[str] = '▁'
_a : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = BigBirdTokenizer
_UpperCamelCase : Tuple = BigBirdTokenizerFast
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[str] = True
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = self.tokenizer_class(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = """<s>"""
_lowerCAmelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowerCAmelCase__ ) , 1004 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase : Dict = self.get_rust_tokenizer()
_lowerCAmelCase : str = tokenizer.encode(lowerCAmelCase__ )
_lowerCAmelCase : str = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = BigBirdTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_lowerCAmelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def __A ( self ):
_lowerCAmelCase : Any = """Hello World!"""
_lowerCAmelCase : List[Any] = [65, 18536, 2260, 101, 66]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : Optional[int] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def __A ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCAmelCase : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase : Tuple = """ """.join(lowerCAmelCase__ )
_lowerCAmelCase : int = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
_lowerCAmelCase : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = BigBirdConfig(attention_type="""original_full""" )
_lowerCAmelCase : Dict = BigBirdModel(lowerCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
_lowerCAmelCase : Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def __A ( self ):
_lowerCAmelCase : Any = {"""input_ids""": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 44 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_UpperCamelCase = iter(lowercase )
while True:
_UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) )
if not chunk:
return
yield chunk
def a__ ( lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCamelCase = ''''''
if len(lowercase ) < 2:
return dirty
for i in range(len(lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase ) & 1:
clean += "X"
return clean
def a__ ( lowercase : str ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase )
return table
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = prepare_input(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 324 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Union[str, Any] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
A__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 185 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {'vocab_file': 'spiece.model'}
lowercase__ : Dict = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowercase__ : Optional[Any] = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ['input_ids', 'attention_mask']
_snake_case : List[int] = []
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCamelCase = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ )
_UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase = []
_UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
_UpperCamelCase = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) )
else:
_UpperCamelCase = ''''''.join(lowerCAmelCase__ )
_UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 324 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return (-y * np.log(UpperCamelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = np.dot(UpperCamelCase_ ,UpperCamelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase_ ) ) )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=7_00_00 ):
"""simple docstring"""
snake_case = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase_ ):
snake_case = np.dot(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = sigmoid_function(UpperCamelCase_ )
snake_case = np.dot(x.T ,h - y ) / y.size
snake_case = theta - alpha * gradient # updating the weights
snake_case = np.dot(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = sigmoid_function(UpperCamelCase_ )
snake_case = cost_function(UpperCamelCase_ ,UpperCamelCase_ )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.load_iris()
_SCREAMING_SNAKE_CASE : Optional[int] = iris.data[:, :2]
_SCREAMING_SNAKE_CASE : int = (iris.target != 0) * 1
_SCREAMING_SNAKE_CASE : Tuple = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return sigmoid_function(
np.dot(UpperCamelCase_ ,UpperCamelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(_SCREAMING_SNAKE_CASE) : str = (x[:, 0].min(), x[:, 0].max())
(_SCREAMING_SNAKE_CASE) : str = (x[:, 1].min(), x[:, 1].max())
(_SCREAMING_SNAKE_CASE) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_SCREAMING_SNAKE_CASE : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
_SCREAMING_SNAKE_CASE : Tuple = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 127 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'audio-spectrogram-transformer'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = patch_size
_UpperCamelCase = qkv_bias
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
| 324 | 0 |
"""simple docstring"""
_UpperCamelCase : Tuple = [0, 2, 4, 6, 8]
_UpperCamelCase : Any = [1, 3, 5, 7, 9]
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : Any = 0
for digit in range(10 ):
lowercase__ : Any = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCAmelCase , _lowerCAmelCase )
return result
lowercase__ : Dict = 0
for digita in range(10 ):
lowercase__ : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
lowercase__ : List[str] = ODD_DIGITS
else:
lowercase__ : Dict = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : Optional[int] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCAmelCase , _lowerCAmelCase , )
return result
def a_ ( _lowerCAmelCase : int = 9 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCAmelCase , 0 , [0] * length , _lowerCAmelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Dict = 'ResNetConfig'
# Base docstring
lowercase__ : str = 'microsoft/resnet-50'
lowercase__ : Tuple = [1, 20_48, 7, 7]
# Image classification docstring
lowercase__ : Optional[Any] = 'microsoft/resnet-50'
lowercase__ : List[str] = 'tiger cat'
lowercase__ : List[Any] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_UpperCamelCase = config.num_channels
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
return embedding
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = out_channels // reduction
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = input
for layer in self.layers:
_UpperCamelCase = layer(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(lowerCAmelCase__ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = ResNetConfig
_snake_case : Union[str, Any] = 'resnet'
_snake_case : Optional[int] = 'pixel_values'
_snake_case : int = True
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = value
lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = ResNetModel(lowerCAmelCase__ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(lowerCAmelCase__ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
_UpperCamelCase = [config.embedding_size] + config.hidden_sizes
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCamelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
| 324 | 0 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
lowercase__ , lowercase__ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
else:
lowercase__ : Dict = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
lowercase__ , lowercase__ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
lowercase__ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
lowercase__ : Optional[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
lowercase__ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
lowercase__ : List[str] = prophet
lowercase__ : str = prophet_old
else:
lowercase__ : Dict = prophet.prophetnet
lowercase__ : int = prophet_old.model
lowercase__ : Tuple = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : str = mapping[attribute]
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0:
lowercase__ : Dict = attribute
elif hasattr(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : List[str] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
lowercase__ : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(f"""{attribute} is initialized""" )
lowercase__ : List[Any] = True
break
elif attribute in special_keys and hasattr(__lowerCamelCase , '''in_proj_weight''' ):
lowercase__ : Tuple = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(__lowerCamelCase , __lowerCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
lowercase__ : str = True
break
if attribute.isdigit():
lowercase__ : Optional[int] = model[int(__lowerCamelCase )]
lowercase__ : int = old_model[int(__lowerCamelCase )]
else:
lowercase__ : Union[str, Any] = getattr(__lowerCamelCase , __lowerCamelCase )
if old_attribute == "":
lowercase__ : Tuple = old_model
else:
if not hasattr(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
lowercase__ : Dict = getattr(__lowerCamelCase , __lowerCamelCase )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 16 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
_UpperCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = after_output[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = to_atuple(vision_model.config.image_size )
_UpperCamelCase = to_atuple(vision_model.config.patch_size )
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
_UpperCamelCase = inputs_dict
_UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple()
_UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
_UpperCamelCase = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_inputs_dict.pop('''vision_config''' )
_UpperCamelCase = config_inputs_dict.pop('''text_config''' )
_UpperCamelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs()
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = after_outputs[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxViTModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaxViTModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCamelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
| 324 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = value
_UpperCAmelCase = None
_UpperCAmelCase = None
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = tree
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase__ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
UpperCAmelCase__ = F'''https://www.google.com/search?q={query}&num=100'''
UpperCAmelCase__ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
UpperCAmelCase__ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
UpperCAmelCase__ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)['url'][0]
webbrowser.open(link)
| 289 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A = logging.getLogger()
def __A ( ) -> Union[str, Any]:
__a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''-f''')
__a : Optional[int] = parser.parse_args()
return args.f
def __A ( a_ :Dict) -> int:
__a : List[Any] = {}
__a : Union[str, Any] = os.path.join(a_ , '''all_results.json''')
if os.path.exists(a_):
with open(a_ , '''r''') as f:
__a : Any = json.load(a_)
else:
raise ValueError(F"""can't find {path}""")
return results
def __A ( ) -> Optional[Any]:
__a : Optional[Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls ):
__a : Tuple = tempfile.mkdtemp()
__a : List[Any] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__a : Tuple = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _lowerCamelCase ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : int = self.get_auto_remove_tmp_dir()
__a : Tuple = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__a : Union[str, Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : List[str] = self.get_auto_remove_tmp_dir()
__a : Union[str, Any] = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__a : Optional[int] = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : Dict = self.get_auto_remove_tmp_dir()
__a : Any = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__a : Any = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : int = 7 if get_gpu_count() > 1 else 2
__a : Optional[int] = self.get_auto_remove_tmp_dir()
__a : Tuple = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__a : Any = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.get_auto_remove_tmp_dir()
__a : Any = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__a : Tuple = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_auto_remove_tmp_dir()
__a : int = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__a : Optional[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.get_auto_remove_tmp_dir()
__a : Dict = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__a : List[str] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : Any = self.get_auto_remove_tmp_dir()
__a : Dict = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__a : Optional[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def _lowerCamelCase ( self ):
__a : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
__a : Dict = self.get_auto_remove_tmp_dir()
__a : List[Any] = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
__a : Union[str, Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCamelCase ( self ):
__a : Tuple = self.get_auto_remove_tmp_dir()
__a : Any = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__a : int = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) ) | 160 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowercase__ : int = 'config.json'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin'
lowercase__ : List[str] = 'diffusion_flax_model.msgpack'
lowercase__ : str = 'model.onnx'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
lowercase__ : List[str] = 'weights.pb'
lowercase__ : str = 'https://huggingface.co'
lowercase__ : str = default_cache_path
lowercase__ : Optional[int] = 'diffusers_modules'
lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase__ : Tuple = ['fp16', 'non-ema']
lowercase__ : int = '.self_attn'
| 324 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : jnp.ndarray
@flax_register_to_config
class lowerCamelCase ( nn.Module , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = 3_2
_A : int = 4
_A : int = 4
_A : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_A : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_A : Union[bool, Tuple[bool]] = False
_A : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_A : int = 2
_A : Union[int, Tuple[int]] = 8
_A : Optional[Union[int, Tuple[int]]] = None
_A : int = 1_2_8_0
_A : float = 0.0
_A : bool = False
_A : jnp.dtype = jnp.floataa
_A : bool = True
_A : int = 0
_A : bool = False
def lowerCAmelCase_ ( self: List[Any] , snake_case: jax.random.KeyArray ) -> FrozenDict:
snake_case_ :Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ :List[Any] = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
snake_case_ :Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ :int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_, snake_case_ :int = jax.random.split(lowerCAmelCase__ )
snake_case_ :Dict = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
snake_case_ :List[str] = self.block_out_channels
snake_case_ :Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ :Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ :str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ :Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ :str = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
snake_case_ :Optional[Any] = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ :str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ :Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ :str = []
snake_case_ :List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ :Optional[Any] = output_channel
snake_case_ :Optional[Any] = block_out_channels[i]
snake_case_ :Tuple = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ :Tuple = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ :str = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
snake_case_ :List[Any] = down_blocks
# mid
snake_case_ :str = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ :Union[str, Any] = []
snake_case_ :Union[str, Any] = list(reversed(lowerCAmelCase__ ) )
snake_case_ :Any = list(reversed(lowerCAmelCase__ ) )
snake_case_ :Tuple = list(reversed(lowerCAmelCase__ ) )
snake_case_ :List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ :str = output_channel
snake_case_ :int = reversed_block_out_channels[i]
snake_case_ :Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
snake_case_ :List[str] = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ :Any = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ :Union[str, Any] = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
snake_case_ :List[str] = output_channel
snake_case_ :List[str] = up_blocks
# out
snake_case_ :Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
snake_case_ :Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: List[str] , snake_case: Optional[Any] , snake_case: List[str] , snake_case: Tuple , snake_case: int=None , snake_case: Any=None , snake_case: bool = True , snake_case: bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
snake_case_ :List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ :str = timesteps.astype(dtype=jnp.floataa )
snake_case_ :List[str] = jnp.expand_dims(lowerCAmelCase__ , 0 )
snake_case_ :List[Any] = self.time_proj(lowerCAmelCase__ )
snake_case_ :Optional[int] = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
snake_case_ :str = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
snake_case_ :Any = self.conv_in(lowerCAmelCase__ )
# 3. down
snake_case_ :Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_, snake_case_ :List[str] = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
snake_case_, snake_case_ :Tuple = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ :str = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ :Tuple = new_down_block_res_samples
# 4. mid
snake_case_ :Optional[int] = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ :Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ :int = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ :Dict = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
snake_case_ :List[Any] = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
snake_case_ :List[Any] = self.conv_norm_out(lowerCAmelCase__ )
snake_case_ :str = nn.silu(lowerCAmelCase__ )
snake_case_ :str = self.conv_out(lowerCAmelCase__ )
snake_case_ :Any = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 66 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
if k.startswith('''encoder''' ):
_UpperCamelCase = k.replace('''.attn''', '''.self_attn''' )
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' )
return k
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCamelCase = sd.pop(lowercase )
_UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
_UpperCamelCase = v
lowercase__ : str = ['START']
@torch.no_grad()
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = model['''model''']
_UpperCamelCase = BlenderbotConfig.from_json_file(lowercase )
_UpperCamelCase = BlenderbotForConditionalGeneration(lowercase )
_UpperCamelCase = m.model.state_dict().keys()
_UpperCamelCase = []
_UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowercase )
m.model.load_state_dict(lowercase, strict=lowercase )
m.half()
m.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 324 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Tuple:
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase :
def __magic_name__ ( self : Any, __A : Dict, __A : str ):
pass
def __magic_name__ ( self : Tuple ):
pass
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : int, __A : np.ndarray, __A : np.ndarray, __A : float ):
UpperCAmelCase : int = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__, lowerCAmelCase__, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def __magic_name__ ( self : List[str], __A : Optional[int], __A : Dict, __A : int, __A : Optional[int], __A : str=None, **__A : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
UpperCAmelCase : int = model(input_ids=lowerCAmelCase__, pixel_values=lowerCAmelCase__, attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def __magic_name__ ( self : str, __A : str, __A : int, __A : Optional[int], __A : str, __A : List[Any]=None, **__A : Any ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.get_vision_text_model(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : Tuple = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = model(input_ids=lowerCAmelCase__, pixel_values=lowerCAmelCase__, attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self : str, __A : Tuple, __A : Dict, __A : List[str], __A : Tuple, __A : List[Any]=None, **__A : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : List[str] = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
UpperCAmelCase : str = model(input_ids=lowerCAmelCase__, pixel_values=lowerCAmelCase__, attention_mask=lowerCAmelCase__ )
UpperCAmelCase : str = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase : List[Any] = model(input_ids=lowerCAmelCase__, pixel_values=lowerCAmelCase__, attention_mask=lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = after_output[0]
UpperCAmelCase : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__, 1E-3 )
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : List[str], __A : Union[str, Any], __A : Optional[int], __A : str=None, **__A : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.get_vision_text_model(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : str = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = model(
input_ids=lowerCAmelCase__, pixel_values=lowerCAmelCase__, attention_mask=lowerCAmelCase__, output_attentions=lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : int = to_atuple(vision_model.config.image_size )
UpperCAmelCase : Union[str, Any] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : int = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def __magic_name__ ( self : List[Any], __A : Union[str, Any], __A : Tuple, __A : int ):
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
UpperCAmelCase : Dict = inputs_dict
UpperCAmelCase : str = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase : Tuple = pt_model(**lowerCAmelCase__ ).to_tuple()
UpperCAmelCase : List[Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ), len(lowerCAmelCase__ ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__, from_pt=lowerCAmelCase__ )
UpperCAmelCase : int = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ), len(lowerCAmelCase__ ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__, from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase : Optional[Any] = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ), len(lowerCAmelCase__ ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__, pt_output_loaded.numpy(), 4E-2 )
def __magic_name__ ( self : Dict, __A : List[Any], __A : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : Any = VisionTextDualEncoderModel(lowerCAmelCase__ )
UpperCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
UpperCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCAmelCase__ )
UpperCAmelCase : Tuple = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
def __magic_name__ ( self : Any, __A : Union[str, Any], __A : Tuple, __A : List[Any] ):
UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : int = VisionTextDualEncoderModel(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
UpperCAmelCase : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__, fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase : Dict = config_inputs_dict.pop('''vision_config''' )
UpperCAmelCase : str = config_inputs_dict.pop('''text_config''' )
UpperCAmelCase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
@slow
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.get_pretrained_model_and_inputs()
UpperCAmelCase : int = model_a(**lowerCAmelCase__ )
UpperCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase : Tuple = model_a(**lowerCAmelCase__ )
UpperCAmelCase : Any = after_outputs[0]
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__, 1E-5 )
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCAmelCase__, text_from_pt=lowerCAmelCase__, )
UpperCAmelCase : Union[str, Any] = 1_3
UpperCAmelCase : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase : str = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCAmelCase : int = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ ( self : int, __A : List[str], __A : Union[str, Any] ):
UpperCAmelCase : Any = FlaxViTModel(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __magic_name__ ( self : str ):
UpperCAmelCase : List[str] = FlaxViTModelTester(self )
UpperCAmelCase : List[Any] = FlaxBertModelTester(self )
UpperCAmelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase : Dict = vision_config_and_inputs
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCAmelCase__, text_from_pt=lowerCAmelCase__, )
UpperCAmelCase : Optional[Any] = 1_3
UpperCAmelCase : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCAmelCase : Dict = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ ( self : Union[str, Any], __A : Dict, __A : Union[str, Any] ):
UpperCAmelCase : Tuple = FlaxCLIPVisionModel(lowerCAmelCase__ )
UpperCAmelCase : Dict = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Union[str, Any] = FlaxCLIPVisionModelTester(self )
UpperCAmelCase : Optional[Any] = FlaxBertModelTester(self )
UpperCAmelCase : Tuple = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase : List[str] = vision_config_and_inputs
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
UpperCAmelCase : int = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase : Any = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='''np''' )
UpperCAmelCase : List[str] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
UpperCAmelCase : Tuple = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCAmelCase__, atol=1E-3 ) )
| 336 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or n < 0:
raise ValueError("Invalid input" )
lowerCAmelCase : Tuple = 1_0**n
lowerCAmelCase : str = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , SCREAMING_SNAKE_CASE )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 108 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __A ( self , a__ ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : Optional[int] = copy.deepcopy(self )
_lowerCAmelCase : int = self.label_schema.copy()
_lowerCAmelCase : Dict = features[self.label_column]
_lowerCAmelCase : Tuple = label_schema
return task_template
@property
def __A ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 44 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify masks
_UpperCamelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
| 324 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict ) -> List[Any]:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
__lowerCamelCase : Optional[Any] = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
A__ : str = imread("""image_data/lena.jpg""", 1)
# convert to its negative
A__ : int = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 185 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : str = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase__ : Optional[int] = {
'google/rembert': 2_56,
}
lowercase__ : str = '▁'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = RemBertTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 324 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase__ , )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = FalconModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
snake_case = True
snake_case = FalconModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
snake_case = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
snake_case = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
snake_case = True
snake_case = True
snake_case = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
snake_case = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , )
snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['''hidden_states'''][0]
snake_case = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['''hidden_states'''][0]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (FalconForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = FalconModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def a_ ( self ):
snake_case , *snake_case = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
snake_case = alibi
self.model_tester.create_and_check_model(lowerCAmelCase__ , *lowerCAmelCase__ )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['''input_ids''']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase__ )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = '''single_label_classification'''
snake_case = input_dict['''input_ids''']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase__ )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = input_dict['''input_ids''']
snake_case = FalconForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
snake_case = input_ids.shape[0]
snake_case = model._convert_to_rw_cache(result.past_key_values )
snake_case = model._convert_cache_to_standard_format(lowerCAmelCase__ , lowerCAmelCase__ )
for layer in range(len(lowerCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = '''multi_label_classification'''
snake_case = input_dict['''input_ids''']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase__ )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self ):
for model_class in self.all_generative_model_classes:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCAmelCase__ , '''use_cache''' ):
return
snake_case = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
if "use_cache" not in inputs:
snake_case = True
snake_case = model(**lowerCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
snake_case = (
getattr(lowerCAmelCase__ , '''decoder_layers''' , lowerCAmelCase__ )
or getattr(lowerCAmelCase__ , '''num_decoder_layers''' , lowerCAmelCase__ )
or config.num_hidden_layers
)
snake_case = getattr(lowerCAmelCase__ , '''num_kv_heads''' , config.num_attention_heads )
snake_case = getattr(lowerCAmelCase__ , '''d_model''' , config.hidden_size )
snake_case = embed_dim // num_attention_heads
snake_case = outputs['''past_key_values''']
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
snake_case , snake_case = inputs['''input_ids'''].shape
for i in range(lowerCAmelCase__ ):
if config.new_decoder_architecture:
snake_case = config.num_attention_heads
elif config.multi_query:
snake_case = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self ):
snake_case = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
snake_case = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(lowerCAmelCase__ )
snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
snake_case = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
snake_case = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=1_9 )
snake_case = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a_ ( self ):
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
snake_case = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(lowerCAmelCase__ )
snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def a_ ( self ):
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
snake_case = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(device=lowerCAmelCase__ )
snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# Test results are the same with and without cache
snake_case = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
snake_case = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 127 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 | 0 |
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase : Any = list[list[int]]
# assigning initial values to the grid
_UpperCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_UpperCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _lowerCAmelCase : Matrix , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _lowerCAmelCase : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _lowerCAmelCase : Matrix ):
'''simple docstring'''
if location := find_empty_location(_lowerCAmelCase ):
lowercase__ , lowercase__ : Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = digit
if sudoku(_lowerCAmelCase ) is not None:
return grid
lowercase__ : Tuple = 0
return None
def a_ ( _lowerCAmelCase : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_lowerCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_UpperCamelCase : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 77 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]:
"""simple docstring"""
_UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(lowercase ) ):
_UpperCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
lowercase, key=lowercase, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 324 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase_ = {
'facebook/mbart-large-50-one-to-many-mmt': 1_024,
}
# fmt: off
lowerCAmelCase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCAmelCase : List[int] = []
lowerCAmelCase : List[int] = []
def __init__( self : Any ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any]=None ,_snake_case : List[Any]=None ,_snake_case : List[str]="</s>" ,_snake_case : List[str]="</s>" ,_snake_case : str="<s>" ,_snake_case : int="<unk>" ,_snake_case : List[Any]="<pad>" ,_snake_case : Union[str, Any]="<mask>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : Any ,) -> None:
"""simple docstring"""
lowercase__ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
lowercase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ : Optional[int] = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ ,tgt_lang=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCAmelCase__ ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
lowercase__ : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Any = 1
lowercase__ : Optional[int] = len(self.sp_model )
lowercase__ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
lowercase__ : str = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ : str = src_lang if src_lang is not None else '''en_XX'''
lowercase__ : List[str] = self.lang_code_to_id[self._src_lang]
lowercase__ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.__dict__.copy()
lowercase__ : Dict = None
return state
def __setstate__( self : List[str] ,_snake_case : Dict ) -> None:
"""simple docstring"""
lowercase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : int = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : int = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : Any ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase__ ,out_type=lowerCAmelCase__ )
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : int ,_snake_case : int ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : str ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = []
lowercase__ : str = ''''''
lowercase__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
lowercase__ : int = True
lowercase__ : Dict = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
lowercase__ : Optional[int] = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[str] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ ,'''wb''' ) as fi:
lowercase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
lowercase__ : Any = [1] * len(self.prefix_tokens )
lowercase__ : List[str] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : str ,_snake_case : Optional[str] ,_snake_case : Optional[str] ,**_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase__ : Tuple = src_lang
lowercase__ : Dict = self(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowercase__ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
lowercase__ : Tuple = tgt_lang_id
return inputs
def UpperCAmelCase ( self : int ,_snake_case : List[str] ,_snake_case : str = "en_XX" ,_snake_case : Optional[List[str]] = None ,_snake_case : str = "ro_RO" ,**_snake_case : List[str] ,) -> BatchEncoding:
"""simple docstring"""
lowercase__ : Dict = src_lang
lowercase__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : Optional[Any] = self.lang_code_to_id[src_lang]
lowercase__ : Any = [self.cur_lang_code_id]
lowercase__ : Any = [self.eos_token_id]
def UpperCAmelCase ( self : Dict ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : Tuple = self.lang_code_to_id[tgt_lang]
lowercase__ : List[Any] = [self.cur_lang_code_id]
lowercase__ : Union[str, Any] = [self.eos_token_id]
| 16 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = BertConfig.from_json_file(__lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 39 |
'''simple docstring'''
import os
import numpy
import onnx
def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase, lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
_graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, lowercase, lowercase )
def a__ ( lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.dirname(lowercase )
_UpperCamelCase = os.path.basename(lowercase )
_UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(lowercase )
dup_set.add(lowercase )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', lowercase )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
_UpperCamelCase = sorted(lowercase )
_remove_dup_initializers_from_model(lowercase, lowercase, lowercase )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowercase, lowercase )
onnx.save(lowercase, lowercase )
return new_model
| 324 | 0 |
"""simple docstring"""
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a ( lowerCAmelCase_ ):
_snake_case : Optional[int] = 'facebook/bart-large-mnli'
_snake_case : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_snake_case : List[Any] = 'text_classifier'
_snake_case : Optional[int] = AutoTokenizer
_snake_case : str = AutoModelForSequenceClassification
_snake_case : Dict = ['text', ['text']]
_snake_case : List[str] = ['text']
def lowerCAmelCase_ ( self : Any ):
super().setup()
_UpperCAmelCase = self.model.config
_UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
_UpperCAmelCase = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 289 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324 | 0 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 50257 , _UpperCAmelCase = 1024 , _UpperCAmelCase = 768 , _UpperCAmelCase = 12 , _UpperCAmelCase = 12 , _UpperCAmelCase = None , _UpperCAmelCase = "gelu_new" , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = False , ):
super().__init__()
__a : Dict = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
__a : str = prefix_inner_dim
__a : List[Any] = prefix_hidden_dim
__a : Any = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__a : Optional[int] = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__a : str = GPTaConfig(
vocab_size=lowerCAmelCase__ , n_positions=lowerCAmelCase__ , n_embd=lowerCAmelCase__ , n_layer=lowerCAmelCase__ , n_head=lowerCAmelCase__ , n_inner=lowerCAmelCase__ , activation_function=lowerCAmelCase__ , resid_pdrop=lowerCAmelCase__ , embd_pdrop=lowerCAmelCase__ , attn_pdrop=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ , initializer_range=lowerCAmelCase__ , scale_attn_weights=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , scale_attn_by_inverse_layer_idx=lowerCAmelCase__ , reorder_and_upcast_attn=lowerCAmelCase__ , )
__a : str = GPTaLMHeadModel(lowerCAmelCase__ )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__a : Any = self.transformer.transformer.wte(lowerCAmelCase__ )
__a : List[str] = self.encode_prefix(lowerCAmelCase__ )
__a : Any = self.decode_prefix(lowerCAmelCase__ )
__a : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__a : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__a : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
__a : Optional[Any] = self.transformer(inputs_embeds=lowerCAmelCase__ , labels=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return torch.zeros(lowerCAmelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase__ )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.encode_prefix(lowerCAmelCase__ )
@torch.no_grad()
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : str = torch.split(lowerCAmelCase__ , 1 , dim=0 )
__a : Any = []
__a : List[Any] = []
for feature in features:
__a : Any = self.decode_prefix(feature.to(lowerCAmelCase__ ) ) # back to the clip feature
# Only support beam search for now
__a , __a : int = self.generate_beam(
input_embeds=lowerCAmelCase__ , device=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__a : int = torch.stack(lowerCAmelCase__ )
__a : int = torch.stack(lowerCAmelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = 5 , _UpperCAmelCase = 67 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = None , ):
__a : Tuple = eos_token_id
__a : Union[str, Any] = None
__a : List[str] = None
__a : Dict = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.int )
__a : Dict = torch.zeros(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.bool )
if input_embeds is not None:
__a : Any = input_embeds
else:
__a : Union[str, Any] = self.transformer.transformer.wte(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
__a : int = self.transformer(inputs_embeds=lowerCAmelCase__ )
__a : List[Any] = outputs.logits
__a : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__a : Union[str, Any] = logits.softmax(-1 ).log()
if scores is None:
__a , __a : List[str] = logits.topk(lowerCAmelCase__ , -1 )
__a : List[str] = generated.expand(lowerCAmelCase__ , *generated.shape[1:] )
__a , __a : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__a : Optional[int] = next_tokens
else:
__a : str = tokens.expand(lowerCAmelCase__ , *tokens.shape[1:] )
__a : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
__a : int = -float(np.inf )
__a : Dict = 0
__a : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__a : List[str] = scores_sum / seq_lengths[:, None]
__a , __a : Dict = scores_sum_average.view(-1 ).topk(lowerCAmelCase__ , -1 )
__a : List[Any] = next_tokens // scores_sum.shape[1]
__a : List[Any] = seq_lengths[next_tokens_source]
__a : str = next_tokens % scores_sum.shape[1]
__a : Tuple = next_tokens.unsqueeze(1 )
__a : str = tokens[next_tokens_source]
__a : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
__a : List[str] = generated[next_tokens_source]
__a : Dict = scores_sum_average * seq_lengths
__a : Dict = is_stopped[next_tokens_source]
__a : List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__a : Optional[Any] = torch.cat((generated, next_token_embed) , dim=1 )
__a : Any = is_stopped + next_tokens.eq(lowerCAmelCase__ ).squeeze()
if is_stopped.all():
break
__a : Optional[int] = scores / seq_lengths
__a : str = scores.argsort(descending=lowerCAmelCase__ )
# tokens tensors are already padded to max_seq_length
__a : Any = [tokens[i] for i in order]
__a : Optional[int] = torch.stack(lowerCAmelCase__ , dim=0 )
__a : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 160 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 324 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Tuple = 'deformable_detr'
_A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self: Optional[Any] , snake_case: str=True , snake_case: List[str]=None , snake_case: Dict=3 , snake_case: List[str]=300 , snake_case: Union[str, Any]=1_024 , snake_case: Tuple=6 , snake_case: Union[str, Any]=1_024 , snake_case: List[Any]=8 , snake_case: List[Any]=6 , snake_case: Tuple=1_024 , snake_case: List[Any]=8 , snake_case: Union[str, Any]=0.0 , snake_case: Tuple=True , snake_case: Any="relu" , snake_case: int=256 , snake_case: Dict=0.1 , snake_case: Tuple=0.0 , snake_case: str=0.0 , snake_case: int=0.0_2 , snake_case: Any=1.0 , snake_case: Optional[Any]=True , snake_case: int=False , snake_case: str="sine" , snake_case: List[Any]="resnet50" , snake_case: str=True , snake_case: str=False , snake_case: List[str]=4 , snake_case: List[str]=4 , snake_case: Optional[Any]=4 , snake_case: Optional[Any]=False , snake_case: Optional[int]=300 , snake_case: int=False , snake_case: Optional[Any]=1 , snake_case: Dict=5 , snake_case: int=2 , snake_case: Tuple=1 , snake_case: Optional[Any]=1 , snake_case: Optional[int]=5 , snake_case: Dict=2 , snake_case: int=0.1 , snake_case: int=0.2_5 , snake_case: Any=False , **snake_case: Optional[Any] , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ :Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ :Optional[int] = backbone_config.get("""model_type""" )
snake_case_ :Dict = CONFIG_MAPPING[backbone_model_type]
snake_case_ :List[Any] = config_class.from_dict(lowerCAmelCase__ )
snake_case_ :List[str] = use_timm_backbone
snake_case_ :Dict = backbone_config
snake_case_ :Dict = num_channels
snake_case_ :Any = num_queries
snake_case_ :Dict = max_position_embeddings
snake_case_ :Union[str, Any] = d_model
snake_case_ :List[Any] = encoder_ffn_dim
snake_case_ :List[Any] = encoder_layers
snake_case_ :Optional[int] = encoder_attention_heads
snake_case_ :List[Any] = decoder_ffn_dim
snake_case_ :Optional[Any] = decoder_layers
snake_case_ :str = decoder_attention_heads
snake_case_ :str = dropout
snake_case_ :Any = attention_dropout
snake_case_ :Dict = activation_dropout
snake_case_ :Optional[int] = activation_function
snake_case_ :Union[str, Any] = init_std
snake_case_ :str = init_xavier_std
snake_case_ :List[str] = encoder_layerdrop
snake_case_ :Optional[Any] = auxiliary_loss
snake_case_ :int = position_embedding_type
snake_case_ :Union[str, Any] = backbone
snake_case_ :int = use_pretrained_backbone
snake_case_ :Optional[Any] = dilation
# deformable attributes
snake_case_ :str = num_feature_levels
snake_case_ :Optional[Any] = encoder_n_points
snake_case_ :str = decoder_n_points
snake_case_ :Tuple = two_stage
snake_case_ :Union[str, Any] = two_stage_num_proposals
snake_case_ :str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
snake_case_ :List[str] = class_cost
snake_case_ :Dict = bbox_cost
snake_case_ :Optional[Any] = giou_cost
# Loss coefficients
snake_case_ :str = mask_loss_coefficient
snake_case_ :Optional[int] = dice_loss_coefficient
snake_case_ :List[str] = bbox_loss_coefficient
snake_case_ :List[str] = giou_loss_coefficient
snake_case_ :str = eos_coefficient
snake_case_ :Optional[Any] = focal_alpha
snake_case_ :int = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def lowerCAmelCase_ ( self: List[str] ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self: int ) -> int:
return self.d_model
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]:
snake_case_ :List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ :Union[str, Any] = self.backbone_config.to_dict()
snake_case_ :Optional[Any] = self.__class__.model_type
return output
| 66 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : jnp.ndarray
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_snake_case : int = 3_2
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_2_8_0
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 )
_UpperCamelCase = self.time_proj(lowerCAmelCase__ )
_UpperCamelCase = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(lowerCAmelCase__ )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
_UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ )
_UpperCamelCase = nn.silu(lowerCAmelCase__ )
_UpperCamelCase = self.conv_out(lowerCAmelCase__ )
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 324 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 'longformer'
def __init__( self : Tuple, __A : Union[List[int], int] = 5_1_2, __A : int = 2, __A : int = 1, __A : int = 0, __A : int = 2, __A : int = 3_0_5_2_2, __A : int = 7_6_8, __A : int = 1_2, __A : int = 1_2, __A : int = 3_0_7_2, __A : str = "gelu", __A : float = 0.1, __A : float = 0.1, __A : int = 5_1_2, __A : int = 2, __A : float = 0.0_2, __A : float = 1E-12, __A : bool = False, **__A : Tuple, ):
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
UpperCAmelCase : List[Any] = attention_window
UpperCAmelCase : int = sep_token_id
UpperCAmelCase : int = bos_token_id
UpperCAmelCase : str = eos_token_id
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : List[Any] = onnx_export
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : int, __A : "PretrainedConfig", __A : str = "default", __A : "List[PatchingSpec]" = None ):
super().__init__(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = True
@property
def __magic_name__ ( self : List[Any] ):
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[int] = super().outputs
if self.task == "default":
UpperCAmelCase : Dict = {0: '''batch'''}
return outputs
@property
def __magic_name__ ( self : Any ):
return 1E-4
@property
def __magic_name__ ( self : str ):
return max(super().default_onnx_opset, 1_4 )
def __magic_name__ ( self : Optional[int], __A : "PreTrainedTokenizerBase", __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : int = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase__, batch_size=lowerCAmelCase__, seq_length=lowerCAmelCase__, is_pair=lowerCAmelCase__, framework=lowerCAmelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase : Dict = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase : str = 1
return inputs
| 336 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Dict = logging.getLogger()
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
return json.load(lowercase )
raise ValueError(F"""can't find {path}""" )
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_glue.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_clm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_summarization_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_ner.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_qa.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 324 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : Union[str, Any] = use_attention_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Union[str, Any] = type_vocab_size
lowerCAmelCase : Union[str, Any] = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Union[str, Any] = num_choices
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = None
if self.use_attention_mask:
lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : str = None
if self.use_token_type_ids:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = config_and_inputs
lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Dict =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = FlaxAlbertModelTester(self )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("albert-base-v2" )
lowerCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowerCAmelCase : str = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowerCAmelCase : Any = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
lowerCAmelCase : List[Any] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 108 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 0 |
"""simple docstring"""
_a : Optional[Any] = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_a : Optional[Any] = concatenate_datasets
_a : List[Any] = DownloadConfig
_a : Tuple = DownloadManager
_a : List[Any] = DownloadMode
_a : Any = DownloadConfig
_a : Any = DownloadMode
_a : Tuple = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 44 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_UpperCamelCase = iter(lowercase )
while True:
_UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) )
if not chunk:
return
yield chunk
def a__ ( lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCamelCase = ''''''
if len(lowercase ) < 2:
return dirty
for i in range(len(lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase ) & 1:
clean += "X"
return clean
def a__ ( lowercase : str ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase )
return table
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = prepare_input(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 324 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "" ) -> bool:
if len(UpperCAmelCase_ ) == 0:
return True
__lowerCamelCase : Tuple = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCamelCase : Dict = {}
for character in lower_case_input_str:
__lowerCamelCase : List[str] = character_freq_dict.get(UpperCAmelCase_ , 0 ) + 1
__lowerCamelCase : Union[str, Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "" ) -> None:
print('\nFor string = ' , UpperCAmelCase_ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase_ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(UpperCAmelCase_ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
A__ : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
A__ : List[str] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 185 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {'vocab_file': 'spiece.model'}
lowercase__ : Dict = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowercase__ : Optional[Any] = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ['input_ids', 'attention_mask']
_snake_case : List[int] = []
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCamelCase = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ )
_UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase = []
_UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
_UpperCamelCase = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) )
else:
_UpperCamelCase = ''''''.join(lowerCAmelCase__ )
_UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 324 | 0 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = word.split()
def justify(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) -> str:
snake_case = max_width - width
snake_case = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
snake_case = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
snake_case = []
snake_case = []
snake_case = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) )
# reset new line and new width
snake_case , snake_case = [word], len(UpperCamelCase_ )
snake_case = max_width - width - len(UpperCamelCase_ )
answer.append(''' '''.join(UpperCamelCase_ ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 127 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'audio-spectrogram-transformer'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = patch_size
_UpperCamelCase = qkv_bias
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
| 324 | 0 |
"""simple docstring"""
from functools import reduce
_UpperCamelCase : str = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a_ ( _lowerCAmelCase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowerCAmelCase , _lowerCAmelCase : str(int(_lowerCAmelCase ) * int(_lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Dict = 'ResNetConfig'
# Base docstring
lowercase__ : str = 'microsoft/resnet-50'
lowercase__ : Tuple = [1, 20_48, 7, 7]
# Image classification docstring
lowercase__ : Optional[Any] = 'microsoft/resnet-50'
lowercase__ : List[str] = 'tiger cat'
lowercase__ : List[Any] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_UpperCamelCase = config.num_channels
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
return embedding
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = out_channels // reduction
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = input
for layer in self.layers:
_UpperCamelCase = layer(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(lowerCAmelCase__ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = ResNetConfig
_snake_case : Union[str, Any] = 'resnet'
_snake_case : Optional[int] = 'pixel_values'
_snake_case : int = True
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = value
lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = ResNetModel(lowerCAmelCase__ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(lowerCAmelCase__ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
_UpperCamelCase = [config.embedding_size] + config.hidden_sizes
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCamelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
| 324 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase__ : List[str] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
lowercase__ : Tuple = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
lowercase__ : int = max(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ) , b_binary.zfill(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
_UpperCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = after_output[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = to_atuple(vision_model.config.image_size )
_UpperCamelCase = to_atuple(vision_model.config.patch_size )
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
_UpperCamelCase = inputs_dict
_UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple()
_UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
_UpperCamelCase = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_inputs_dict.pop('''vision_config''' )
_UpperCamelCase = config_inputs_dict.pop('''text_config''' )
_UpperCamelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs()
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = after_outputs[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxViTModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaxViTModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCamelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
| 324 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_a = logging.getLogger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = 'summarization'
UpperCamelCase__ = ['loss']
UpperCamelCase__ = ROUGE_KEYS
UpperCamelCase__ = 'rouge2'
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
_UpperCAmelCase = Path(self.output_dir ) / 'metrics.json'
_UpperCAmelCase = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(lowerCAmelCase__ )
_UpperCAmelCase = self.config.model_type
_UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
_UpperCAmelCase = {
'data_dir': self.hparams.data_dir,
'max_source_length': self.hparams.max_source_length,
'prefix': self.model.config.prefix or '',
}
_UpperCAmelCase = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
_UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCAmelCase = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCAmelCase = get_git_info()['repo_sha']
_UpperCAmelCase = hparams.num_workers
_UpperCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
_UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCAmelCase = self.decoder_start_token_id
_UpperCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
_UpperCAmelCase = False
_UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCAmelCase = self.hparams.eval_max_gen_length
else:
_UpperCAmelCase = self.model.config.max_length
_UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
_UpperCAmelCase = True
return readable_batch
def UpperCamelCase ( self , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.pad_token_id
_UpperCAmelCase , _UpperCAmelCase = batch['input_ids'], batch['attention_mask']
_UpperCAmelCase = batch['labels']
if isinstance(self.model , lowerCAmelCase__ ):
_UpperCAmelCase = self.model._shift_right(lowerCAmelCase__ )
else:
_UpperCAmelCase = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCAmelCase = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
_UpperCAmelCase = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
_UpperCAmelCase = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCAmelCase = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self._step(lowerCAmelCase__ )
_UpperCAmelCase = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
_UpperCAmelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
_UpperCAmelCase = batch['input_ids'].shape[0]
_UpperCAmelCase = batch['input_ids'].eq(self.pad ).sum()
_UpperCAmelCase = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="val" ):
"""simple docstring"""
self.step_count += 1
_UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCAmelCase = losses['loss']
_UpperCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
_UpperCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCAmelCase = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
_UpperCAmelCase = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
_UpperCAmelCase = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
_UpperCAmelCase = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCAmelCase = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCAmelCase = (time.time() - ta) / batch['input_ids'].shape[0]
_UpperCAmelCase = self.ids_to_clean_text(lowerCAmelCase__ )
_UpperCAmelCase = self.ids_to_clean_text(batch['labels'] )
_UpperCAmelCase = self._step(lowerCAmelCase__ )
_UpperCAmelCase = dict(zip(self.loss_names , lowerCAmelCase__ ) )
_UpperCAmelCase = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.validation_epoch_end(lowerCAmelCase__ , prefix='test' )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.n_obs[type_path]
_UpperCAmelCase = self.target_lens[type_path]
_UpperCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCAmelCase = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def UpperCamelCase ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
'--max_source_length' , default=1024 , type=lowerCAmelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=lowerCAmelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=lowerCAmelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=lowerCAmelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCAmelCase__ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCAmelCase__ )
parser.add_argument('--max_tokens_per_batch' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument('--logger_name' , type=lowerCAmelCase__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowerCAmelCase__ , default=500 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowerCAmelCase__ , default='summarization' , required=lowerCAmelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument('--src_lang' , type=lowerCAmelCase__ , default='' , required=lowerCAmelCase__ )
parser.add_argument('--tgt_lang' , type=lowerCAmelCase__ , default='' , required=lowerCAmelCase__ )
parser.add_argument('--eval_beams' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'--val_metric' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = 'translation'
UpperCamelCase__ = ['loss']
UpperCamelCase__ = ['bleu']
UpperCamelCase__ = 'bleu'
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase = hparams.src_lang
_UpperCAmelCase = hparams.tgt_lang
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( __lowerCAmelCase , __lowerCAmelCase=None )-> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=__lowerCAmelCase )
check_output_dir(__lowerCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCAmelCase = SummarizationModule(__lowerCAmelCase )
else:
_UpperCAmelCase = TranslationModule(__lowerCAmelCase )
_UpperCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
_UpperCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase = os.environ.get('WANDB_PROJECT' , __lowerCAmelCase )
_UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=__lowerCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
_UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_UpperCAmelCase = False
_UpperCAmelCase = args.val_metric == 'loss'
_UpperCAmelCase = generic_train(
__lowerCAmelCase , __lowerCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowerCAmelCase ) , early_stopping_callback=__lowerCAmelCase , logger=__lowerCAmelCase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
_UpperCAmelCase = ''
_UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__lowerCAmelCase ) )
if checkpoints:
_UpperCAmelCase = checkpoints[-1]
_UpperCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_a = argparse.ArgumentParser()
_a = pl.Trainer.add_argparse_args(parser)
_a = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_a = parser.parse_args()
main(args)
| 39 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = '▁'
UpperCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCAmelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = PegasusTokenizer
_snake_case : Any = ['input_ids', 'attention_mask']
def __init__( self : int , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : List[str]="<mask_2>" , __lowerCAmelCase : Any="<mask_1>" , __lowerCAmelCase : str=None , __lowerCAmelCase : Any=103 , **__lowerCAmelCase : Dict , ):
_UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is'''
f''' {type(lowerCAmelCase__ )}''' )
_UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_UpperCAmelCase = additional_special_tokens_extended
else:
_UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Dict ):
_UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List , __lowerCAmelCase : Optional[List] = None , __lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 289 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 'switch_transformers'
__lowerCAmelCase = ['past_key_values']
__lowerCAmelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , _UpperCAmelCase=32128 , _UpperCAmelCase=768 , _UpperCAmelCase=64 , _UpperCAmelCase=2048 , _UpperCAmelCase=64 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=12 , _UpperCAmelCase=8 , _UpperCAmelCase=False , _UpperCAmelCase=0.0_1 , _UpperCAmelCase="float32" , _UpperCAmelCase=False , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.0_0_1 , _UpperCAmelCase=0.0_0_1 , _UpperCAmelCase=1.0 , _UpperCAmelCase="relu" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , **_UpperCAmelCase , ):
__a : Optional[int] = vocab_size
__a : int = d_model
__a : Optional[Any] = d_kv
__a : int = d_ff
__a : Dict = num_sparse_encoder_layers
__a : List[Any] = num_layers
__a : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : int = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a : Dict = self.num_layers // self.num_sparse_encoder_layers
else:
__a : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a : Any = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a : Union[str, Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a : Union[str, Any] = num_heads
__a : str = num_experts
__a : str = expert_capacity
__a : Any = router_bias
__a : Dict = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__a : int = router_dtype
__a : Dict = router_ignore_padding_tokens
__a : List[Any] = relative_attention_num_buckets
__a : List[Any] = relative_attention_max_distance
__a : int = dropout_rate
__a : Optional[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Optional[int] = feed_forward_proj
__a : str = use_cache
__a : List[str] = add_router_probs
__a : Dict = router_z_loss_coef
__a : Optional[int] = router_aux_loss_coef
__a : Dict = self.feed_forward_proj.split('''-''' )
__a : Optional[Any] = act_info[-1]
__a : Dict = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , ) | 160 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowercase__ : int = 'config.json'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin'
lowercase__ : List[str] = 'diffusion_flax_model.msgpack'
lowercase__ : str = 'model.onnx'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
lowercase__ : List[str] = 'weights.pb'
lowercase__ : str = 'https://huggingface.co'
lowercase__ : str = default_cache_path
lowercase__ : Optional[int] = 'diffusers_modules'
lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase__ : Tuple = ['fp16', 'non-ema']
lowercase__ : int = '.self_attn'
| 324 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__a = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
snake_case_ :List[str] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ :Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Dict = pipe(
image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
snake_case_ :Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 66 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
if k.startswith('''encoder''' ):
_UpperCamelCase = k.replace('''.attn''', '''.self_attn''' )
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' )
return k
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCamelCase = sd.pop(lowercase )
_UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
_UpperCamelCase = v
lowercase__ : str = ['START']
@torch.no_grad()
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = model['''model''']
_UpperCamelCase = BlenderbotConfig.from_json_file(lowercase )
_UpperCamelCase = BlenderbotForConditionalGeneration(lowercase )
_UpperCamelCase = m.model.state_dict().keys()
_UpperCamelCase = []
_UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowercase )
m.model.load_state_dict(lowercase, strict=lowercase )
m.half()
m.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 324 | 0 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[int] ) -> int:
if not nums:
return 0
UpperCAmelCase : Optional[int] = nums[0]
UpperCAmelCase : Tuple = 0
for num in nums[1:]:
UpperCAmelCase , UpperCAmelCase : Any = (
max_excluding + num,
max(UpperCAmelCase , UpperCAmelCase ),
)
return max(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : str = a.name
lowerCAmelCase : int = b.name
lowerCAmelCase : List[Any] = ""
lowerCAmelCase : List[Any] = ""
lowerCAmelCase : List[str] = a == b
lowerCAmelCase : List[str] = name_a
lowerCAmelCase : Optional[int] = name_b
return res
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = list(model.graph.initializer )
lowerCAmelCase : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCAmelCase : Dict = inits[i].name
lowerCAmelCase : Optional[int] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = os.path.dirname(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = os.path.basename(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = onnx.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Union[str, Any] = list(model.graph.initializer )
lowerCAmelCase : Optional[Any] = set()
lowerCAmelCase : Any = {}
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Any = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE )
dup_set.add(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = inits[j].data_type
lowerCAmelCase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
lowerCAmelCase : Optional[int] = inits[i].name
lowerCAmelCase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Tuple = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
lowerCAmelCase : Any = sorted(SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = "optimized_" + model_file_name
lowerCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
onnx.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return new_model
| 108 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ):
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Dict = (32, 32)
_lowerCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase__ )
@property
def __A ( self ):
def extract(*a__ , **a__ ):
class __A :
def __init__( self ):
_lowerCAmelCase : Tuple = torch.ones([0] )
def __A ( self , a__ ):
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.dummy_cond_unet
_lowerCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
_lowerCAmelCase : Union[str, Any] = self.dummy_vae
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Dict = StableDiffusionPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase : List[str] = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowerCAmelCase : Dict = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowerCAmelCase : Any = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Dict = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.dummy_cond_unet
_lowerCAmelCase : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_lowerCAmelCase : str = self.dummy_vae
_lowerCAmelCase : Optional[Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : int = StableDiffusionPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase : str = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : str = output.images
_lowerCAmelCase : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase__ , )[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(pipe.scheduler , lowerCAmelCase__ )
assert pipe.safety_checker is None
_lowerCAmelCase : str = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ):
_lowerCAmelCase : Tuple = self.dummy_cond_unet
_lowerCAmelCase : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = self.dummy_vae
_lowerCAmelCase : List[Any] = self.dummy_text_encoder
_lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_lowerCAmelCase : Tuple = unet.half()
_lowerCAmelCase : str = vae.half()
_lowerCAmelCase : str = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Optional[int] = StableDiffusionPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase : str = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : int = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase : int = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_lowerCAmelCase : Any = 4003660346
_lowerCAmelCase : Dict = 7
# without safety guidance (sld_guidance_scale = 0)
_lowerCAmelCase : str = torch.manual_seed(lowerCAmelCase__ )
_lowerCAmelCase : Tuple = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_lowerCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
_lowerCAmelCase : Dict = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase : int = """padme amidala taking a bath artwork, safe for work, no nudity"""
_lowerCAmelCase : Any = 2734971755
_lowerCAmelCase : Any = 7
_lowerCAmelCase : str = torch.manual_seed(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_lowerCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase : Tuple = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_lowerCAmelCase : Optional[int] = 1044355234
_lowerCAmelCase : Optional[Any] = 12
_lowerCAmelCase : Dict = torch.manual_seed(lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_lowerCAmelCase : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : str = output.images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCAmelCase : int = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 44 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify masks
_UpperCamelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
| 324 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
A__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
A__ : int = 'config.json'
A__ : Optional[int] = 'diffusion_pytorch_model.bin'
A__ : List[str] = 'diffusion_flax_model.msgpack'
A__ : str = 'model.onnx'
A__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
A__ : List[str] = 'weights.pb'
A__ : str = 'https://huggingface.co'
A__ : str = default_cache_path
A__ : Optional[int] = 'diffusers_modules'
A__ : Optional[int] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
A__ : Tuple = ['fp16', 'non-ema']
A__ : int = '.self_attn'
| 185 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : str = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase__ : Optional[int] = {
'google/rembert': 2_56,
}
lowercase__ : str = '▁'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = RemBertTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 324 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_SCREAMING_SNAKE_CASE : str = get_logger(__name__)
class A__ :
"""simple docstring"""
def __init__( self , __snake_case = None ):
snake_case = (
os.path.join(lowerCAmelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case = Extractor
def a_ ( self , __snake_case ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case = os.path.abspath(lowerCAmelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase__ ) )
def a_ ( self , __snake_case , __snake_case ):
return force_extract or (
not os.path.isfile(lowerCAmelCase__ ) and not (os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ))
)
def a_ ( self , __snake_case , __snake_case = False ):
snake_case = self.extractor.infer_extractor_format(lowerCAmelCase__ )
if not extractor_format:
return input_path
snake_case = self._get_output_path(lowerCAmelCase__ )
if self._do_extract(lowerCAmelCase__ , lowerCAmelCase__ ):
self.extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return output_path
class A__ ( snake_case__ ):
"""simple docstring"""
@classmethod
@abstractmethod
def a_ ( cls , __snake_case , **__snake_case ):
...
@staticmethod
@abstractmethod
def a_ ( __snake_case , __snake_case ):
...
class A__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
__magic_name__ = []
@staticmethod
def a_ ( __snake_case , __snake_case ):
with open(lowerCAmelCase__ , '''rb''' ) as f:
return f.read(lowerCAmelCase__ )
@classmethod
def a_ ( cls , __snake_case , __snake_case = b"" ):
if not magic_number:
snake_case = max(len(lowerCAmelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
snake_case = cls.read_magic_number(lowerCAmelCase__ , lowerCAmelCase__ )
except OSError:
return False
return any(magic_number.startswith(lowerCAmelCase__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( snake_case__ ):
"""simple docstring"""
@classmethod
def a_ ( cls , __snake_case , **__snake_case ):
return tarfile.is_tarfile(lowerCAmelCase__ )
@staticmethod
def a_ ( __snake_case , __snake_case ):
def resolved(__snake_case ) -> str:
return os.path.realpath(os.path.abspath(lowerCAmelCase__ ) )
def badpath(__snake_case , __snake_case ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ).startswith(lowerCAmelCase__ )
def badlink(__snake_case , __snake_case ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case = resolved(os.path.join(lowerCAmelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCAmelCase__ )
snake_case = resolved(lowerCAmelCase__ )
for finfo in members:
if badpath(finfo.name , lowerCAmelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def a_ ( __snake_case , __snake_case ):
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
snake_case = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ , members=TarExtractor.safemembers(lowerCAmelCase__ , lowerCAmelCase__ ) )
tar_file.close()
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [b'\x1F\x8B']
@staticmethod
def a_ ( __snake_case , __snake_case ):
with gzip.open(lowerCAmelCase__ , '''rb''' ) as gzip_file:
with open(lowerCAmelCase__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def a_ ( cls , __snake_case , __snake_case = b"" ):
if super().is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCAmelCase__ , '''rb''' ) as fp:
snake_case = _EndRecData(lowerCAmelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case = fp.read(lowerCAmelCase__ ) # CD is where we expect it to be
if len(lowerCAmelCase__ ) == sizeCentralDir:
snake_case = struct.unpack(lowerCAmelCase__ , lowerCAmelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a_ ( __snake_case , __snake_case ):
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with zipfile.ZipFile(lowerCAmelCase__ , '''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def a_ ( __snake_case , __snake_case ):
with lzma.open(lowerCAmelCase__ ) as compressed_file:
with open(lowerCAmelCase__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def a_ ( __snake_case , __snake_case ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
snake_case = rarfile.RarFile(lowerCAmelCase__ )
rf.extractall(lowerCAmelCase__ )
rf.close()
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def a_ ( __snake_case , __snake_case ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case = zstd.ZstdDecompressor()
with open(lowerCAmelCase__ , '''rb''' ) as ifh, open(lowerCAmelCase__ , '''wb''' ) as ofh:
dctx.copy_stream(lowerCAmelCase__ , lowerCAmelCase__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [b'\x42\x5A\x68']
@staticmethod
def a_ ( __snake_case , __snake_case ):
with bza.open(lowerCAmelCase__ , '''rb''' ) as compressed_file:
with open(lowerCAmelCase__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def a_ ( __snake_case , __snake_case ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with pyazr.SevenZipFile(lowerCAmelCase__ , '''r''' ) as archive:
archive.extractall(lowerCAmelCase__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [b'\x04\x22\x4D\x18']
@staticmethod
def a_ ( __snake_case , __snake_case ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(lowerCAmelCase__ , '''rb''' ) as compressed_file:
with open(lowerCAmelCase__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ )
class A__ :
"""simple docstring"""
__magic_name__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a_ ( cls ):
return max(
len(lowerCAmelCase__ )
for extractor in cls.extractors.values()
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a_ ( __snake_case , __snake_case ):
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase__ , magic_number_length=lowerCAmelCase__ )
except OSError:
return b""
@classmethod
def a_ ( cls , __snake_case , __snake_case = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=lowerCAmelCase__ , )
snake_case = cls.infer_extractor_format(lowerCAmelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a_ ( cls , __snake_case ): # <Added version="2.4.0"/>
snake_case = cls._get_magic_number_max_length()
snake_case = cls._read_magic_number(lowerCAmelCase__ , lowerCAmelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__ ):
return extractor_format
@classmethod
def a_ ( cls , __snake_case , __snake_case , __snake_case = None , __snake_case = "deprecated" , ):
os.makedirs(os.path.dirname(lowerCAmelCase__ ) , exist_ok=lowerCAmelCase__ )
# Prevent parallel extractions
snake_case = str(Path(lowerCAmelCase__ ).with_suffix('''.lock''' ) )
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ , ignore_errors=lowerCAmelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=lowerCAmelCase__ , )
snake_case = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case = cls.extractors[extractor_format]
return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=lowerCAmelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCAmelCase__ ):
return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
| 127 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : Optional[int] = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 77 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]:
"""simple docstring"""
_UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(lowercase ) ):
_UpperCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
lowercase, key=lowercase, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 324 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : List[str] ,_snake_case : List[str]=13 ,_snake_case : Any=30 ,_snake_case : str=2 ,_snake_case : Union[str, Any]=3 ,_snake_case : Dict=True ,_snake_case : Optional[int]=True ,_snake_case : Optional[Any]=32 ,_snake_case : Union[str, Any]=5 ,_snake_case : str=4 ,_snake_case : Dict=37 ,_snake_case : int="gelu" ,_snake_case : List[str]=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : Optional[int]=10 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : List[str]=None ,_snake_case : Tuple=2 ,) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[Any] = scope
lowercase__ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : Tuple = (image_size // patch_size) ** 2
lowercase__ : Optional[int] = num_patches + 1
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCAmelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : str ,_snake_case : List[str] ,_snake_case : Any ,_snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : str = 1
lowercase__ : Any = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : Any ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = self.type_sequence_label_size
lowercase__ : str = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[int] = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : Tuple = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = config_and_inputs
lowercase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = ViTModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=lowerCAmelCase__ ,has_text_modality=lowerCAmelCase__ ,hidden_size=37 )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ ,nn.Linear ) )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowerCAmelCase__ )
lowercase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[Any] = [*signature.parameters.keys()]
lowercase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowerCAmelCase__ )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( ) -> int:
lowercase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : str = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(lowerCAmelCase__ )
lowercase__ : Any = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : Any = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase__ : Any = model(**lowerCAmelCase__ )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase__ )
lowercase__ : Optional[Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase__ : Dict = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(lowerCAmelCase__ )
lowercase__ : List[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' ,size=480 )
lowercase__ : Tuple = prepare_img()
lowercase__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowercase__ : List[Any] = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase__ : Any = model(lowerCAmelCase__ ,interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
lowercase__ : Any = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,lowerCAmelCase__ )
lowercase__ : List[Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCAmelCase__ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Any = ViTModel.from_pretrained('''facebook/dino-vits8''' ,torch_dtype=torch.floataa ,device_map='''auto''' )
lowercase__ : int = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : int = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowercase__ : int = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ : Optional[int] = model(lowerCAmelCase__ )
| 16 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 0 |
import math
class __lowerCamelCase :
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(lowerCAmelCase__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
for i in range(len(lowerCAmelCase__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 39 |
'''simple docstring'''
import os
import numpy
import onnx
def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase, lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
_graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, lowercase, lowercase )
def a__ ( lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.dirname(lowercase )
_UpperCamelCase = os.path.basename(lowercase )
_UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(lowercase )
dup_set.add(lowercase )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', lowercase )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
_UpperCamelCase = sorted(lowercase )
_remove_dup_initializers_from_model(lowercase, lowercase, lowercase )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowercase, lowercase )
onnx.save(lowercase, lowercase )
return new_model
| 324 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 256}
_UpperCAmelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(lowerCAmelCase__ , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ):
_UpperCAmelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_UpperCAmelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ):
_UpperCAmelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Tuple ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Any , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCAmelCase : Optional[Any] , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(lowerCAmelCase__ , param_name="""crop_size""" )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Tuple] = None ):
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase__ )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 289 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Tuple = parent
__a : Dict = batch_size
__a : int = seq_length
__a : Optional[Any] = is_training
__a : List[str] = use_attention_mask
__a : int = use_token_type_ids
__a : str = use_labels
__a : Union[str, Any] = vocab_size
__a : Union[str, Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : Optional[Any] = type_vocab_size
__a : Union[str, Any] = type_sequence_label_size
__a : List[str] = initializer_range
__a : Any = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Tuple = None
if self.use_attention_mask:
__a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[str] = None
if self.use_token_type_ids:
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : str = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Tuple = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : List[str] = True
__a : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : Optional[int] = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
__a : Optional[Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
__a : Optional[Any] = model(lowerCAmelCase__ )[0]
__a : Optional[int] = [1, 11, 50265]
self.assertEqual(list(output.shape ) , lowerCAmelCase__ )
# compare the actual values for a slice.
__a : Union[str, Any] = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
__a : List[str] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
__a : Dict = model(lowerCAmelCase__ )[0]
# compare the actual values for a slice.
__a : Dict = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) ) | 160 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 324 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = 'audio-spectrogram-transformer'
def __init__( self: Optional[Any] , snake_case: List[str]=768 , snake_case: Optional[Any]=12 , snake_case: int=12 , snake_case: int=3_072 , snake_case: List[str]="gelu" , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.0 , snake_case: int=0.0_2 , snake_case: Union[str, Any]=1E-12 , snake_case: Any=16 , snake_case: str=True , snake_case: List[str]=10 , snake_case: int=10 , snake_case: Dict=1_024 , snake_case: Optional[int]=128 , **snake_case: List[Any] , ) -> Tuple:
super().__init__(**lowerCAmelCase__ )
snake_case_ :Tuple = hidden_size
snake_case_ :Any = num_hidden_layers
snake_case_ :List[str] = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Any = hidden_act
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[int] = attention_probs_dropout_prob
snake_case_ :Tuple = initializer_range
snake_case_ :int = layer_norm_eps
snake_case_ :Union[str, Any] = patch_size
snake_case_ :int = qkv_bias
snake_case_ :List[Any] = frequency_stride
snake_case_ :List[str] = time_stride
snake_case_ :str = max_length
snake_case_ :Tuple = num_mel_bins
| 66 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : jnp.ndarray
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_snake_case : int = 3_2
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_2_8_0
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 )
_UpperCamelCase = self.time_proj(lowerCAmelCase__ )
_UpperCamelCase = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(lowerCAmelCase__ )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
_UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ )
_UpperCamelCase = nn.silu(lowerCAmelCase__ )
_UpperCamelCase = self.conv_out(lowerCAmelCase__ )
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 324 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
_lowerCamelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_lowerCamelCase : Dict = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = CamembertTokenizer
UpperCamelCase = CamembertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : int ):
UpperCAmelCase : List[Any] = '''<pad>'''
UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ), lowerCAmelCase__ )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ), 1_0_0_4 )
def __magic_name__ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_5 )
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : List[Any] = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
def __magic_name__ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : List[str] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
UpperCAmelCase : Any = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ )
UpperCAmelCase : int = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Union[str, Any] = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase : str = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='''camembert-base''', revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''', sequences=lowerCAmelCase__, )
| 336 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Dict = logging.getLogger()
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
return json.load(lowercase )
raise ValueError(F"""can't find {path}""" )
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_glue.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_clm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_summarization_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_ner.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_qa.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 324 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a__ ( ):
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1_0_2_4 )
print("Key files generation successful." )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
print("Generating prime p..." )
lowerCAmelCase : int = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE )
print("Generating prime q..." )
lowerCAmelCase : Tuple = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
lowerCAmelCase : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
lowerCAmelCase : Tuple = cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
lowerCAmelCase : str = (n, e)
lowerCAmelCase : int = (n, d)
return (public_key, private_key)
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowerCAmelCase , lowerCAmelCase : Dict = generate_key(SCREAMING_SNAKE_CASE )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 108 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Tuple = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = 'bridgetower_vision_model'
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ):
super().__init__(**lowerCAmelCase__ )
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Optional[Any] = initializer_factor
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : List[str] = stop_gradient
_lowerCAmelCase : int = share_layernorm
_lowerCAmelCase : Union[str, Any] = remove_last_layer
@classmethod
def __A ( cls , a__ , **a__ ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowerCAmelCase : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = 'bridgetower_text_model'
def __init__( self , a__=50265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ):
super().__init__(**lowerCAmelCase__ )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Dict = initializer_factor
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Optional[Any] = bos_token_id
_lowerCAmelCase : Any = eos_token_id
@classmethod
def __A ( cls , a__ , **a__ ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowerCAmelCase : Optional[Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = 'bridgetower'
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ):
_lowerCAmelCase : List[str] = kwargs.pop("""text_config_dict""" , lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""vision_config_dict""" , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = share_cross_modal_transformer_layers
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Dict = initializer_factor
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Tuple = share_link_tower_layers
_lowerCAmelCase : Union[str, Any] = link_tower_type
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = tie_word_embeddings
_lowerCAmelCase : Union[str, Any] = init_layernorm_from_vision_encoder
if text_config is None:
_lowerCAmelCase : str = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_lowerCAmelCase : Optional[Any] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_lowerCAmelCase : Any = BridgeTowerTextConfig(**lowerCAmelCase__ )
_lowerCAmelCase : Dict = BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def __A ( self ):
_lowerCAmelCase : str = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : Dict = self.text_config.to_dict()
_lowerCAmelCase : List[Any] = self.vision_config.to_dict()
_lowerCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 44 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_UpperCamelCase = iter(lowercase )
while True:
_UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) )
if not chunk:
return
yield chunk
def a__ ( lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCamelCase = ''''''
if len(lowercase ) < 2:
return dirty
for i in range(len(lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase ) & 1:
clean += "X"
return clean
def a__ ( lowercase : str ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase )
return table
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = prepare_input(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 324 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Optional[int] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 185 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {'vocab_file': 'spiece.model'}
lowercase__ : Dict = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowercase__ : Optional[Any] = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ['input_ids', 'attention_mask']
_snake_case : List[int] = []
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCamelCase = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ )
_UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase = []
_UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
_UpperCamelCase = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) )
else:
_UpperCamelCase = ''''''.join(lowerCAmelCase__ )
_UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 324 | 0 |
import os
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_SCREAMING_SNAKE_CASE : Optional[int] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase_ ,**UpperCamelCase_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase_ ,**UpperCamelCase_ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase_ ,**UpperCamelCase_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase_ ,**UpperCamelCase_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase_ ,**UpperCamelCase_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase_ ,**UpperCamelCase_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase_ ,**UpperCamelCase_ )
| 127 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'audio-spectrogram-transformer'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = patch_size
_UpperCamelCase = qkv_bias
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
| 324 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Any = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : Union[str, Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_UpperCamelCase : Optional[int] = {
'camembert-base': 5_12,
}
_UpperCamelCase : Any = '▁'
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str = ['input_ids', 'attention_mask']
lowerCamelCase__ : Any = CamembertTokenizer
def __init__( self , a=None , a=None , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=["<s>NOTUSED", "</s>NOTUSED"] , **a , ) -> Optional[int]:
lowercase__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__ : List[Any] = vocab_file
lowercase__ : int = False if not self.vocab_file else True
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
lowercase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : int = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 77 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Dict = 'ResNetConfig'
# Base docstring
lowercase__ : str = 'microsoft/resnet-50'
lowercase__ : Tuple = [1, 20_48, 7, 7]
# Image classification docstring
lowercase__ : Optional[Any] = 'microsoft/resnet-50'
lowercase__ : List[str] = 'tiger cat'
lowercase__ : List[Any] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_UpperCamelCase = config.num_channels
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
return embedding
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = out_channels // reduction
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = input
for layer in self.layers:
_UpperCamelCase = layer(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(lowerCAmelCase__ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = ResNetConfig
_snake_case : Union[str, Any] = 'resnet'
_snake_case : Optional[int] = 'pixel_values'
_snake_case : int = True
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = value
lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = ResNetModel(lowerCAmelCase__ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(lowerCAmelCase__ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
_UpperCamelCase = [config.embedding_size] + config.hidden_sizes
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCamelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
| 324 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = 'xmod'
def __init__( self : Any ,_snake_case : Optional[int]=30_522 ,_snake_case : Optional[int]=768 ,_snake_case : Any=12 ,_snake_case : Tuple=12 ,_snake_case : Dict=3_072 ,_snake_case : int="gelu" ,_snake_case : Any=0.1 ,_snake_case : List[Any]=0.1 ,_snake_case : Dict=512 ,_snake_case : Dict=2 ,_snake_case : Any=0.02 ,_snake_case : Any=1e-12 ,_snake_case : Tuple=1 ,_snake_case : Optional[Any]=0 ,_snake_case : Optional[int]=2 ,_snake_case : Dict="absolute" ,_snake_case : List[Any]=True ,_snake_case : Dict=None ,_snake_case : int=False ,_snake_case : Dict=2 ,_snake_case : Optional[Any]=False ,_snake_case : Dict=True ,_snake_case : Optional[int]=True ,_snake_case : Any=("en_XX",) ,_snake_case : str=None ,**_snake_case : List[Any] ,) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ ,bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowercase__ : int = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Dict = initializer_range
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : Any = position_embedding_type
lowercase__ : List[str] = use_cache
lowercase__ : List[str] = classifier_dropout
lowercase__ : str = pre_norm
lowercase__ : List[str] = adapter_reduction_factor
lowercase__ : str = adapter_layer_norm
lowercase__ : Optional[Any] = adapter_reuse_layer_norm
lowercase__ : Union[str, Any] = ln_before_adapter
lowercase__ : Any = list(lowerCAmelCase__ )
lowercase__ : Optional[int] = default_language
class __A ( A_ ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 16 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
_UpperCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = after_output[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = to_atuple(vision_model.config.image_size )
_UpperCamelCase = to_atuple(vision_model.config.patch_size )
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
_UpperCamelCase = inputs_dict
_UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple()
_UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
_UpperCamelCase = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_inputs_dict.pop('''vision_config''' )
_UpperCamelCase = config_inputs_dict.pop('''text_config''' )
_UpperCamelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs()
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = after_outputs[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxViTModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaxViTModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCamelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
| 324 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "x" , __lowerCAmelCase = 10**-10 , __lowerCAmelCase = 1 , )-> complex:
"""simple docstring"""
_UpperCAmelCase = symbols(__lowerCAmelCase )
_UpperCAmelCase = lambdify(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = lambdify(__lowerCAmelCase , diff(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(__lowerCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(__lowerCAmelCase ) / diff_function(
__lowerCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 39 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCAmelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
UpperCAmelCase__ = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCAmelCase__ = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" ,lowercase )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase = {
config.replace("""Config""" ,"""""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_UpperCAmelCase = collections.defaultdict(lowercase )
_UpperCAmelCase = collections.defaultdict(lowercase )
_UpperCAmelCase = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
_UpperCAmelCase = None
if _re_tf_models.match(lowercase ) is not None:
_UpperCAmelCase = tf_models
_UpperCAmelCase = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
_UpperCAmelCase = flax_models
_UpperCAmelCase = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
_UpperCAmelCase = pt_models
_UpperCAmelCase = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
_UpperCAmelCase = True
break
# Try again after removing the last word in the name
_UpperCAmelCase = """""".join(camel_case_split(lowercase )[:-1] )
_UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_UpperCAmelCase = list(lowercase )
all_models.sort()
_UpperCAmelCase = {"""model_type""": all_models}
_UpperCAmelCase = [pt_models[t] for t in all_models]
_UpperCAmelCase = [tf_models[t] for t in all_models]
_UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_UpperCAmelCase = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_UpperCAmelCase = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_UpperCAmelCase = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_UpperCAmelCase = """AutoTokenizer"""
_UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_UpperCAmelCase = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}''']
_UpperCAmelCase = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase ,lowercase ,lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase ,lowercase ):
continue
# First extract all model_names
_UpperCAmelCase = []
for name in getattr(lowercase ,lowercase ).values():
if isinstance(lowercase ,lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_frameworks_table()
_UpperCAmelCase = Dataset.from_pandas(lowercase )
_UpperCAmelCase = hf_hub_download(
"""huggingface/transformers-metadata""" ,"""pipeline_tags.json""" ,repo_type="""dataset""" ,token=lowercase )
_UpperCAmelCase = Dataset.from_json(lowercase )
_UpperCAmelCase = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(lowercase ) )
}
_UpperCAmelCase = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_UpperCAmelCase = sorted(table.keys() )
_UpperCAmelCase = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_UpperCAmelCase = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase ,"""frameworks.json""" ) )
tags_dataset.to_json(os.path.join(lowercase ,"""pipeline_tags.json""" ) )
if commit_sha is not None:
_UpperCAmelCase = (
f'''Update with commit {commit_sha}\n\nSee: '''
f'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
_UpperCAmelCase = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" ,folder_path=lowercase ,repo_type="""dataset""" ,token=lowercase ,commit_message=lowercase ,)
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
_UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
_UpperCAmelCase = pipeline_tasks[key]["""pt"""]
if isinstance(lowercase ,(list, tuple) ):
_UpperCAmelCase = model[0]
_UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
_UpperCAmelCase = """, """.join(lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
UpperCAmelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 289 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
A = logging.get_logger(__name__)
# General docstring
A = 'ResNetConfig'
# Base docstring
A = 'microsoft/resnet-50'
A = [1, 2_048, 7, 7]
# Image classification docstring
A = 'microsoft/resnet-50'
A = 'tiger cat'
A = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
__a : Optional[Any] = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
__a : int = nn.BatchNormad(lowerCAmelCase__ )
__a : str = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[int] = self.convolution(lowerCAmelCase__ )
__a : int = self.normalization(lowerCAmelCase__ )
__a : List[Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
__a : List[Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__a : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__a : Tuple = config.num_channels
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
__a : Any = self.embedder(lowerCAmelCase__ )
__a : List[Any] = self.pooler(lowerCAmelCase__ )
return embedding
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 ):
super().__init__()
__a : Dict = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
__a : Dict = nn.BatchNormad(lowerCAmelCase__ )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = self.convolution(lowerCAmelCase__ )
__a : List[str] = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
__a : str = in_channels != out_channels or stride != 1
__a : Optional[int] = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
__a : int = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
__a : Tuple = ACTaFN[activation]
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[int] = hidden_state
__a : Tuple = self.layer(lowerCAmelCase__ )
__a : List[str] = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
__a : List[str] = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , _UpperCAmelCase = 4 ):
super().__init__()
__a : Tuple = in_channels != out_channels or stride != 1
__a : str = out_channels // reduction
__a : Optional[Any] = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
__a : str = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
__a : List[str] = ACTaFN[activation]
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = hidden_state
__a : Dict = self.layer(lowerCAmelCase__ )
__a : Optional[Any] = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
__a : List[str] = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , ):
super().__init__()
__a : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
__a : int = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[int] = input
for layer in self.layers:
__a : Any = layer(lowerCAmelCase__ )
return hidden_state
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
__a : List[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__a : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ):
__a : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a : Any = hidden_states + (hidden_state,)
__a : str = stage_module(lowerCAmelCase__ )
if output_hidden_states:
__a : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ResNetConfig
__lowerCAmelCase = 'resnet'
__lowerCAmelCase = 'pixel_values'
__lowerCAmelCase = True
def _lowerCamelCase ( self , _UpperCAmelCase ):
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Dict = value
A = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _UpperCamelCase , )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(lowerCAmelCase__ )
__a : List[str] = config
__a : List[str] = ResNetEmbeddings(lowerCAmelCase__ )
__a : Tuple = ResNetEncoder(lowerCAmelCase__ )
__a : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
__a : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : Any = return_dict if return_dict is not None else self.config.use_return_dict
__a : Optional[int] = self.embedder(lowerCAmelCase__ )
__a : Dict = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
__a : Dict = encoder_outputs[0]
__a : Dict = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , _UpperCamelCase , )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(lowerCAmelCase__ )
__a : Optional[Any] = config.num_labels
__a : Tuple = ResNetModel(lowerCAmelCase__ )
# classification head
__a : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__a : List[Any] = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
__a : Dict = outputs.pooler_output if return_dict else outputs[1]
__a : Optional[Any] = self.classifier(lowerCAmelCase__ )
__a : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : Tuple = '''single_label_classification'''
else:
__a : Union[str, Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
__a : Any = MSELoss()
if self.num_labels == 1:
__a : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Tuple = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
__a : str = CrossEntropyLoss()
__a : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Optional[Any] = BCEWithLogitsLoss()
__a : Optional[Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
__a : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ''' , _UpperCamelCase , )
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
__a : Any = [config.embedding_size] + config.hidden_sizes
__a : Tuple = ResNetEmbeddings(lowerCAmelCase__ )
__a : str = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
__a : Any = return_dict if return_dict is not None else self.config.use_return_dict
__a : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = self.embedder(lowerCAmelCase__ )
__a : Optional[Any] = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
__a : int = outputs.hidden_states
__a : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__a : int = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , ) | 160 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowercase__ : int = 'config.json'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin'
lowercase__ : List[str] = 'diffusion_flax_model.msgpack'
lowercase__ : str = 'model.onnx'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
lowercase__ : List[str] = 'weights.pb'
lowercase__ : str = 'https://huggingface.co'
lowercase__ : str = default_cache_path
lowercase__ : Optional[int] = 'diffusers_modules'
lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase__ : Tuple = ['fp16', 'non-ema']
lowercase__ : int = '.self_attn'
| 324 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = 'open-llama'
def __init__( self: str , snake_case: Dict=100_000 , snake_case: int=4_096 , snake_case: Optional[Any]=11_008 , snake_case: int=32 , snake_case: Dict=32 , snake_case: Optional[Any]="silu" , snake_case: str=2_048 , snake_case: Dict=0.0_2 , snake_case: Any=1E-6 , snake_case: Optional[Any]=True , snake_case: Union[str, Any]=0 , snake_case: Dict=1 , snake_case: int=2 , snake_case: Any=False , snake_case: Optional[int]=True , snake_case: Optional[int]=0.1 , snake_case: Tuple=0.1 , snake_case: List[Any]=True , snake_case: Optional[int]=True , snake_case: Union[str, Any]=None , **snake_case: List[Any] , ) -> Tuple:
snake_case_ :List[Any] = vocab_size
snake_case_ :Dict = max_position_embeddings
snake_case_ :List[Any] = hidden_size
snake_case_ :Optional[Any] = intermediate_size
snake_case_ :Optional[Any] = num_hidden_layers
snake_case_ :Optional[Any] = num_attention_heads
snake_case_ :Optional[int] = hidden_act
snake_case_ :str = initializer_range
snake_case_ :Optional[int] = rms_norm_eps
snake_case_ :Tuple = use_cache
snake_case_ :Optional[Any] = kwargs.pop(
"""use_memorry_efficient_attention""" , lowerCAmelCase__ )
snake_case_ :Any = hidden_dropout_prob
snake_case_ :Dict = attention_dropout_prob
snake_case_ :int = use_stable_embedding
snake_case_ :int = shared_input_output_embedding
snake_case_ :int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , )
def lowerCAmelCase_ ( self: Any ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
snake_case_ :Tuple = self.rope_scaling.get("""type""" , lowerCAmelCase__ )
snake_case_ :int = self.rope_scaling.get("""factor""" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 66 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
if k.startswith('''encoder''' ):
_UpperCamelCase = k.replace('''.attn''', '''.self_attn''' )
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' )
return k
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCamelCase = sd.pop(lowercase )
_UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
_UpperCamelCase = v
lowercase__ : str = ['START']
@torch.no_grad()
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = model['''model''']
_UpperCamelCase = BlenderbotConfig.from_json_file(lowercase )
_UpperCamelCase = BlenderbotForConditionalGeneration(lowercase )
_UpperCamelCase = m.model.state_dict().keys()
_UpperCamelCase = []
_UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowercase )
m.model.load_state_dict(lowercase, strict=lowercase )
m.half()
m.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 324 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Dict, __A : Optional[int]=7, __A : List[Any]=3, __A : Optional[Any]=1_8, __A : Union[str, Any]=3_0, __A : Any=4_0_0, __A : Any=True, __A : Tuple=None, __A : str=True, __A : List[str]=None, __A : Optional[Any]=True, __A : str=[0.5, 0.5, 0.5], __A : int=[0.5, 0.5, 0.5], ):
UpperCAmelCase : List[Any] = size if size is not None else {'''shortest_edge''': 1_8}
UpperCAmelCase : Tuple = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : Optional[int] = min_resolution
UpperCAmelCase : Optional[Any] = max_resolution
UpperCAmelCase : str = do_resize
UpperCAmelCase : Optional[Any] = size
UpperCAmelCase : Optional[Any] = do_center_crop
UpperCAmelCase : Tuple = crop_size
UpperCAmelCase : List[Any] = do_normalize
UpperCAmelCase : str = image_mean
UpperCAmelCase : List[Any] = image_std
def __magic_name__ ( self : Union[str, Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def __magic_name__ ( self : int ):
UpperCAmelCase : int = LevitImageProcessingTester(self )
@property
def __magic_name__ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''size''' ) )
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size, {'''height''': 1_8, '''width''': 1_8} )
UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size, {'''height''': 8_4, '''width''': 8_4} )
def __magic_name__ ( self : Optional[int] ):
pass
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, Image.Image )
# Test not batched input
UpperCAmelCase : List[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : int = image_processing(lowerCAmelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[Any] = image_processing(lowerCAmelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, torch.Tensor )
# Test not batched input
UpperCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[str] = image_processing(lowerCAmelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 336 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ='bert'
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : int = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 108 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 0 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_a : int = 6_378_137.0
_a : Optional[Any] = 6_356_752.314_245
_a : Any = 6_378_137
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
_lowerCAmelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase : int = haversine_distance(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase : List[Any] = (b_lata + b_lata) / 2
_lowerCAmelCase : List[str] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase : List[Any] = (sin(_lowerCamelCase ) ** 2) * (cos(_lowerCamelCase ) ** 2)
_lowerCAmelCase : Optional[Any] = cos(sigma / 2 ) ** 2
_lowerCAmelCase : Dict = (sigma - sin(_lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase : Optional[Any] = (cos(_lowerCamelCase ) ** 2) * (sin(_lowerCamelCase ) ** 2)
_lowerCAmelCase : str = sin(sigma / 2 ) ** 2
_lowerCAmelCase : Any = (sigma + sin(_lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify masks
_UpperCamelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
| 324 | 0 |
'''simple docstring'''
from torch import nn
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
super().__init__()
__lowerCamelCase : Union[str, Any] = class_size
__lowerCamelCase : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowerCamelCase : int = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : str = self.mlp(lowerCAmelCase__ )
return logits
| 185 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : str = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase__ : Optional[int] = {
'google/rembert': 2_56,
}
lowercase__ : str = '▁'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = RemBertTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 324 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=True , a=1 / 2_5_5 , a=True , ) -> Optional[int]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = num_channels
lowercase__ : Any = min_resolution
lowercase__ : Tuple = max_resolution
lowercase__ : List[str] = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Dict = do_normalize
lowercase__ : Optional[int] = image_mean
lowercase__ : Union[str, Any] = image_std
lowercase__ : int = do_rescale
lowercase__ : Union[str, Any] = rescale_factor
lowercase__ : Any = do_pad
def _UpperCAmelCase ( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , a , a=False ) -> str:
if not batched:
lowercase__ : Any = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase__ , lowercase__ : List[Any] = image.size
else:
lowercase__ , lowercase__ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
lowercase__ : Optional[Any] = self.size['shortest_edge']
elif w > h:
lowercase__ : Tuple = self.size['shortest_edge']
lowercase__ : List[Any] = int(self.size['shortest_edge'] * w / h )
else:
lowercase__ : Tuple = self.size['shortest_edge']
lowercase__ : Tuple = self.size['shortest_edge']
else:
lowercase__ : Optional[Any] = []
for image in image_inputs:
lowercase__ , lowercase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda a : item[0] )[0]
lowercase__ : List[str] = max(lowerCAmelCase__ , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = DeformableDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
lowercase__ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : List[str] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Any = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__ : List[Any] = json.loads(f.read() )
lowercase__ : Tuple = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowercase__ : str = DeformableDetrImageProcessor()
lowercase__ : int = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__ : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
lowercase__ : Optional[int] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__ : Dict = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
lowercase__ : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify orig_size
lowercase__ : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__ : Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__ : List[Any] = json.loads(f.read() )
lowercase__ : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowercase__ : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__ : Optional[Any] = DeformableDetrImageProcessor(format='coco_panoptic' )
lowercase__ : List[str] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__ : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
lowercase__ : Tuple = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__ : Optional[int] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
lowercase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__ : str = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify masks
lowercase__ : List[str] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowercase__ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__ : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
| 77 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]:
"""simple docstring"""
_UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(lowercase ) ):
_UpperCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
lowercase, key=lowercase, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 324 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 16 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 0 |
_a = 8.31_44_62 # Unit - J mol-1 K-1
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 39 |
'''simple docstring'''
import os
import numpy
import onnx
def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase, lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
_graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase )
def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, lowercase, lowercase )
def a__ ( lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.dirname(lowercase )
_UpperCamelCase = os.path.basename(lowercase )
_UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(lowercase )
dup_set.add(lowercase )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', lowercase )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
_UpperCamelCase = sorted(lowercase )
_remove_dup_initializers_from_model(lowercase, lowercase, lowercase )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowercase, lowercase )
onnx.save(lowercase, lowercase )
return new_model
| 324 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a ( unittest.TestCase ):
_snake_case : Dict = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_snake_case : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
# test with a raw waveform
_UpperCAmelCase = np.zeros((3_4000,) )
_UpperCAmelCase = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = examples
_UpperCAmelCase = audio_classifier(lowerCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
_UpperCAmelCase = audio_classifier(lowerCAmelCase__ , top_k=1 )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
self.run_torchaudio(lowerCAmelCase__ )
@require_torchaudio
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int ):
import datasets
# test with a local file
_UpperCAmelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_UpperCAmelCase = dataset[0]["""audio"""]["""array"""]
_UpperCAmelCase = audio_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = """anton-l/wav2vec2-random-tiny-classifier"""
_UpperCAmelCase = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase = np.ones((8000,) )
_UpperCAmelCase = audio_classifier(lowerCAmelCase__ , top_k=4 )
_UpperCAmelCase = [
{"""score""": 0.0_842, """label""": """no"""},
{"""score""": 0.0_838, """label""": """up"""},
{"""score""": 0.0_837, """label""": """go"""},
{"""score""": 0.0_834, """label""": """right"""},
]
_UpperCAmelCase = [
{"""score""": 0.0_845, """label""": """stop"""},
{"""score""": 0.0_844, """label""": """on"""},
{"""score""": 0.0_841, """label""": """right"""},
{"""score""": 0.0_834, """label""": """left"""},
]
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_UpperCAmelCase = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_UpperCAmelCase = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCAmelCase_ ( self : Tuple ):
import datasets
_UpperCAmelCase = """superb/wav2vec2-base-superb-ks"""
_UpperCAmelCase = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_UpperCAmelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_UpperCAmelCase = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
| 289 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ) -> int:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE__ = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE__ = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE__ = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 325 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.