code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import factorial
A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def _snake_case ( lowerCamelCase__ : List[Any] ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase_ ) )
def _snake_case ( lowerCamelCase__ : str = 60 , lowerCamelCase__ : Tuple = 1_000_000 ) -> int:
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowerCamelCase_ : Any =0
# the cached sizes of the previous chains
lowerCamelCase_ : Dict ={}
for start_chain_element in range(1 , lowercase_ ):
# The temporary set will contain the elements of the chain
lowerCamelCase_ : Any =set()
lowerCamelCase_ : Optional[Any] =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase_ : List[str] =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase_ )
chain_set_length += 1
lowerCamelCase_ : Union[str, Any] =digit_factorial_sum(lowercase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase_ : List[Any] =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution()}')
| 144 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 0 |
from __future__ import annotations
__UpperCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__UpperCamelCase : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : int = []
UpperCamelCase__ : Optional[Any] = len(lowercase_ )
for i in range(lowercase_ ):
UpperCamelCase__ : List[str] = -1
for j in range(i + 1 , lowercase_ ):
if arr[i] < arr[j]:
UpperCamelCase__ : Tuple = arr[j]
break
result.append(lowercase_ )
return result
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = []
for i, outer in enumerate(lowercase_ ):
UpperCamelCase__ : Optional[Any] = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase__ : Dict = inner
break
result.append(lowercase_ )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : Dict = len(lowercase_ )
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Union[str, Any] = [-1] * arr_size
for index in reversed(range(lowercase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase__ : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__UpperCamelCase : int = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 146 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=True ):
"""simple docstring"""
model.train()
__UpperCAmelCase : str = model(lowercase_ )
__UpperCAmelCase : Tuple = F.mse_loss(lowercase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase_ )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=False ):
"""simple docstring"""
set_seed(42 )
__UpperCAmelCase : Dict = RegressionModel()
__UpperCAmelCase : Optional[Any] = deepcopy(lowercase_ )
__UpperCAmelCase : Any = RegressionDataset(length=80 )
__UpperCAmelCase : Tuple = DataLoader(lowercase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase : Optional[int] = AdamW(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : Optional[int] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__UpperCAmelCase : List[Any] = LambdaLR(lowercase_ , lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
__UpperCAmelCase : int = LambdaLR(lowercase_ , lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = accelerator.prepare(lowercase_ , lowercase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = get_training_setup(lowercase_ )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = next(iter(lowercase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
# Sync grads
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : Union[str, Any] = ddp_input[torch.randperm(len(lowercase_ ) )]
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_training_setup(lowercase_ )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = next(iter(lowercase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : str = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
# Sync grads
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : List[str] = ddp_input[torch.randperm(len(lowercase_ ) )]
def lowercase_ ( lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=False ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Accelerator(
split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = get_training_setup(lowercase_ )
for iteration, batch in enumerate(lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : Any = ddp_input[torch.randperm(len(lowercase_ ) )]
GradientState._reset_state()
def lowercase_ ( lowerCAmelCase__ : str=False , lowerCAmelCase__ : Dict=False ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = Accelerator(
split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_training_setup(lowercase_ , lowercase_ )
for iteration, batch in enumerate(lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
__UpperCAmelCase : int = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = Accelerator()
__UpperCAmelCase : int = RegressionDataset(length=80 )
__UpperCAmelCase : Dict = DataLoader(lowercase_ , batch_size=16 )
__UpperCAmelCase : Optional[Any] = RegressionDataset(length=96 )
__UpperCAmelCase : str = DataLoader(lowercase_ , batch_size=16 )
__UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(lowercase_ , lowercase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ )
if iteration < len(lowercase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ )
if batch_num < len(lowercase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = Accelerator()
__UpperCAmelCase : List[Any] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowercase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowercase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase_ , lowercase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase_ , lowercase_ )
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 254 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _UpperCAmelCase ( self ) -> Any:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = LlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
_a = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = True
_a = LlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
_a = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
_a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
_a = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
_a = True
_a = True
_a = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
_a = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
_a = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
A_ : Tuple = (LlamaForCausalLM,) if is_torch_available() else ()
A_ : Any = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[Any] = False
A_ : str = False
def _UpperCAmelCase ( self ) -> int:
_a = LlamaModelTester(self )
_a = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(UpperCAmelCase__ )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> List[str]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(UpperCAmelCase__ )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(UpperCAmelCase__ )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def _UpperCAmelCase ( self ) -> int:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = LlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
_a = original_model(UpperCAmelCase__ ).last_hidden_state
_a = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = LlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
_a = scaled_model(UpperCAmelCase__ ).last_hidden_state
_a = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_a = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
_a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_a = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _UpperCAmelCase ( self ) -> str:
_a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_a = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
_a = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
_a = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_a = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
_a = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
_a = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def _UpperCAmelCase ( self ) -> Dict:
_a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_a = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
_a = model(torch.tensor(UpperCAmelCase__ ) )
_a = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
_a = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
_a = '''Simply put, the theory of relativity states that '''
_a = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
_a = tokenizer.encode(UpperCAmelCase__ , return_tensors='''pt''' )
_a = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase__ )
# greedy generation outputs
_a = model.generate(UpperCAmelCase__ , max_new_tokens=64 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__ )
_a = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) | 320 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( __lowerCAmelCase = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def _lowerCAmelCase ( __lowerCAmelCase = "" ) -> bool:
"""simple docstring"""
if len(lowercase_ ) == 0:
return True
snake_case__ : Optional[Any] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
snake_case__ : str = {}
for character in lower_case_input_str:
snake_case__ : List[Any] = character_freq_dict.get(lowercase_ , 0 ) + 1
snake_case__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( __lowerCAmelCase = "" ) -> None:
"""simple docstring"""
print('''\nFor string = ''' , lowercase_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
A__ = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
A__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 230 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowercase_ ):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : int = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 57 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 16
A_ : Any = 32
def A ( snake_case__ , snake_case__ = 16 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Union[str, Any] = mocked_dataloaders # noqa: F811
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
SCREAMING_SNAKE_CASE__ = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase_ )
def inner_training_loop(snake_case__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=lowercase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ = model(**lowercase_ )
SCREAMING_SNAKE_CASE__ = outputs.loss
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**lowercase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 165 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class _UpperCamelCase ( UpperCAmelCase__ ):
def __init__( self :Optional[Any] , *lowerCamelCase :List[str] , **lowerCamelCase :Dict ) -> Dict:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(UpperCAmelCase__ )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :int=None , lowerCamelCase :Optional[int]=None , lowerCamelCase :int=None , **lowerCamelCase :Any ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
if padding is not None:
UpperCAmelCase__ = padding
if truncation is not None:
UpperCAmelCase__ = truncation
if top_k is not None:
UpperCAmelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :Dict , lowerCamelCase :Union["Image.Image", str] , lowerCamelCase :str = None , **lowerCamelCase :List[Any] ) -> List[Any]:
if isinstance(UpperCAmelCase__ , (Image.Image, str) ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase__ = {"image": image, "question": question}
else:
UpperCAmelCase__ = image
UpperCAmelCase__ = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
return results
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Dict , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :Tuple=False ) -> Optional[Any]:
UpperCAmelCase__ = load_image(inputs["image"] )
UpperCAmelCase__ = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )
UpperCAmelCase__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase__ )
return model_inputs
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :List[Any] ) -> str:
UpperCAmelCase__ = self.model(**UpperCAmelCase__ )
return model_outputs
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Dict , lowerCamelCase :Optional[int]=5 ) -> List[Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(UpperCAmelCase__ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
| 169 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_=-1 ) -> Tuple:
__lowerCAmelCase = label_idx
def A__ ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__lowerCAmelCase = mode.value
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
__lowerCAmelCase = 1
__lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
__lowerCAmelCase = []
__lowerCAmelCase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
__lowerCAmelCase = []
__lowerCAmelCase = []
else:
__lowerCAmelCase = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__lowerCAmelCase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] )
def A__ ( self , snake_case_ ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
__lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
__lowerCAmelCase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__(label_idx=-2 )
def A__ ( self , snake_case_ ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
__lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
__lowerCAmelCase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def A__ ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__lowerCAmelCase = mode.value
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
__lowerCAmelCase = 1
__lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
__lowerCAmelCase = []
__lowerCAmelCase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = 0
for sentence in parse_incr(UpperCAmelCase__ ):
__lowerCAmelCase = preds_list[example_id]
__lowerCAmelCase = """"""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def A__ ( self , snake_case_ ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 301 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import functools
def lowerCamelCase__ ( a__ : Optional[int] , a__ : str ) -> int:
if not isinstance(lowercase_ , lowercase_ ) or not all(isinstance(lowercase_ , lowercase_ ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(lowercase_ ) != 3 or not all(isinstance(lowercase_ , lowercase_ ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(lowercase_ ) == 0:
return 0
if min(lowercase_ ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(lowercase_ ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
UpperCamelCase_ = set(lowercase_ )
@functools.cache
def dynamic_programming(a__ : str ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Optional[Any] ):
'''simple docstring'''
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('String lengths must match!' )
lowercase = 0
for chara, chara in zip(lowercase_ , lowercase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( UpperCAmelCase__ ):
_UpperCAmelCase :Dict = (DDIMParallelScheduler,)
_UpperCAmelCase :Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase__ ( self : str , **snake_case__ : str ):
lowerCamelCase_ : Any ={
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( self : int , **snake_case__ : List[Any] ):
lowerCamelCase_ : Tuple =self.scheduler_classes[0]
lowerCamelCase_ : Optional[int] =self.get_scheduler_config(**UpperCAmelCase__ )
lowerCamelCase_ : int =scheduler_class(**UpperCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =10, 0.0
lowerCamelCase_ : List[str] =self.dummy_model()
lowerCamelCase_ : Dict =self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for t in scheduler.timesteps:
lowerCamelCase_ : str =model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ : Optional[int] =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase__ )
lowerCamelCase_ : int =self.scheduler_classes[0]
lowerCamelCase_ : List[Any] =self.get_scheduler_config(steps_offset=1 )
lowerCamelCase_ : Union[str, Any] =scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase__ ( self : str ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def UpperCAmelCase__ ( self : Tuple ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCAmelCase__ , eta=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[Any] =self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] =self.get_scheduler_config()
lowerCamelCase_ : Optional[Any] =scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : str =self.scheduler_classes[0]
lowerCamelCase_ : Optional[Any] =self.get_scheduler_config()
lowerCamelCase_ : List[str] =scheduler_class(**UpperCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Tuple =10, 0.0
scheduler.set_timesteps(UpperCAmelCase__ )
lowerCamelCase_ : List[str] =self.dummy_model()
lowerCamelCase_ : List[str] =self.dummy_sample_deter
lowerCamelCase_ : List[Any] =self.dummy_sample_deter + 0.1
lowerCamelCase_ : str =self.dummy_sample_deter - 0.1
lowerCamelCase_ : int =samplea.shape[0]
lowerCamelCase_ : Union[str, Any] =torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCamelCase_ : List[str] =torch.arange(UpperCAmelCase__ )[0:3, None].repeat(1 , UpperCAmelCase__ )
lowerCamelCase_ : Dict =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCamelCase_ : Any =scheduler.batch_step_no_noise(UpperCAmelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCAmelCase__ )
lowerCamelCase_ : Any =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCamelCase_ : Optional[Any] =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[str] =self.full_loop()
lowerCamelCase_ : str =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCamelCase_ : Optional[int] =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =self.full_loop(prediction_type="v_prediction" )
lowerCamelCase_ : Any =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCamelCase_ : Tuple =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[str] =self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01 )
lowerCamelCase_ : Any =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCamelCase_ : int =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[str] =self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01 )
lowerCamelCase_ : Optional[Any] =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCamelCase_ : List[Any] =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 146 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : int = []
for data in source_data:
for i, el in enumerate(lowercase_ ):
if len(lowercase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase_ ) )
return data_lists
def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = []
for dlist, weight in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase : List[str] = min(lowercase_ )
__UpperCAmelCase : Dict = max(lowercase_ )
__UpperCAmelCase : Optional[int] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__UpperCAmelCase : Tuple = f'Invalid weight of {weight:f} provided'
raise ValueError(lowercase_ )
score_lists.append(lowercase_ )
return score_lists
def lowercase_ ( lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase_ ):
__UpperCAmelCase : Any = final_scores[j] + ele
return final_scores
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Tuple = get_data(lowercase_ )
__UpperCAmelCase : Union[str, Any] = calculate_each_score(lowercase_ , lowercase_ )
__UpperCAmelCase : Tuple = generate_final_scores(lowercase_ )
# append scores to source data
for i, ele in enumerate(lowercase_ ):
source_data[i].append(lowercase_ )
return source_data
| 254 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = len(lowercase_ )
_a = []
for i in range(len(lowercase_ ) - pat_len + 1 ):
_a = True
for j in range(lowercase_ ):
if s[i + j] != pattern[j]:
_a = False
break
if match_found:
position.append(lowercase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC''')) | 320 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = (DDPMScheduler,)
def __lowerCamelCase ( self :Optional[int] ,**__lowercase :Any ):
snake_case__ : Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__ )
return config
def __lowerCamelCase ( self :Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __lowerCamelCase ( self :Dict ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ ,beta_end=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __lowerCamelCase ( self :str ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ ,prediction_type=UpperCAmelCase__ ,sample_max_value=UpperCAmelCase__ ,)
def __lowerCamelCase ( self :Dict ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[Any] ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : Union[str, Any] = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**UpperCAmelCase__ )
snake_case__ : int = len(UpperCAmelCase__ )
snake_case__ : List[str] = self.dummy_model()
snake_case__ : int = self.dummy_sample_deter
snake_case__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
snake_case__ : Union[str, Any] = model(UpperCAmelCase__ ,UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
snake_case__ : List[Any] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ : List[Any] = pred_prev_sample
snake_case__ : Dict = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case__ : Dict = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**UpperCAmelCase__ )
snake_case__ : Any = len(UpperCAmelCase__ )
snake_case__ : int = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter
snake_case__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
snake_case__ : Optional[Any] = model(UpperCAmelCase__ ,UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
snake_case__ : List[str] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ : Tuple = pred_prev_sample
snake_case__ : Tuple = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case__ : str = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**UpperCAmelCase__ )
snake_case__ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
snake_case__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
snake_case__ : Dict = -1
else:
snake_case__ : int = timesteps[i + 1]
snake_case__ : Any = scheduler.previous_timestep(UpperCAmelCase__ )
snake_case__ : List[Any] = prev_t.item()
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
def __lowerCamelCase ( self :str ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Dict = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
snake_case__ : Optional[Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__ ,msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : Optional[int] = scheduler_class(**UpperCAmelCase__ )
snake_case__ : str = [1_0_0, 8_7, 5_0, 1, 0]
snake_case__ : Union[str, Any] = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ ,timesteps=UpperCAmelCase__ )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = self.scheduler_classes[0]
snake_case__ : Union[str, Any] = self.get_scheduler_config()
snake_case__ : Dict = scheduler_class(**UpperCAmelCase__ )
snake_case__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 230 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14 | 0 |
"""simple docstring"""
from math import ceil
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(0 , lowercase_ ) )
__lowerCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(lowercase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase_ )
# Missing blocks
__lowerCAmelCase = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(lowercase_ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(lowercase_ ) )
if len(lowercase_ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(lowercase_ ) )
if len(lowercase_ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(lowercase_ ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(lowercase_ ) )
__lowerCAmelCase = int(ceil(n_layers / len(lowercase_ ) ) )
__lowerCAmelCase = [layers[i : i + n_blocks] for i in range(0 , lowercase_ , lowercase_ )]
return dict(zip(lowercase_ , lowercase_ ) )
| 57 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE__ = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
SCREAMING_SNAKE_CASE__ = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
SCREAMING_SNAKE_CASE__ = val
return f[i][j]
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE__ = dp[i - 1][w_]
return dp[n][w_], dp
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
SCREAMING_SNAKE_CASE__ = len(lowercase_ )
if num_items != len(lowercase_ ):
SCREAMING_SNAKE_CASE__ = (
"""The number of weights must be the same as the number of values.\n"""
f"""But got {num_items} weights and {len(lowercase_ )} values"""
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
SCREAMING_SNAKE_CASE__ = (
"""All weights must be integers but got weight of """
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowercase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
A_ : str = [3, 2, 4, 4]
A_ : Tuple = [4, 3, 2, 3]
A_ : int = 4
A_ : Any = 6
A_ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
A_ : str = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
A_ : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 165 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCAmelCase : str = logging.get_logger(__name__)
class _UpperCamelCase ( UpperCAmelCase__ ):
def __init__( self :int , *lowerCamelCase :Dict , **lowerCamelCase :List[Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 169 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
_snake_case = '''swinv2'''
_snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case_=224 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1e-5 , snake_case_=32 , **snake_case_ , ) -> Any:
super().__init__(**UpperCAmelCase__ )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
__lowerCAmelCase = (0, 0, 0, 0)
| 301 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 0 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( a__ : int , a__ : List[str] ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> list[str]:
UpperCamelCase_ = []
UpperCamelCase_ = 11
UpperCamelCase_ = int("""1""" + """0""" * digit_len )
for num in range(lowercase_ , lowercase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase_ , lowercase_ ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
UpperCamelCase_ = 10
return solutions
def lowerCamelCase__ ( a__ : int = 2 ) -> int:
UpperCamelCase_ = 1.0
for fraction in fraction_list(lowercase_ ):
UpperCamelCase_ = Fraction(lowercase_ )
result *= frac.denominator / frac.numerator
return int(lowercase_ )
if __name__ == "__main__":
print(solution())
| 122 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_UpperCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_UpperCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 220 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase__ :
def __init__( self : List[str] , snake_case__ : Optional[int] , snake_case__ : List[str]=13 , snake_case__ : List[Any]=64 , snake_case__ : Optional[int]=2 , snake_case__ : List[str]=3 , snake_case__ : int=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : Any=5 , snake_case__ : List[str]=4 , snake_case__ : Any=37 , snake_case__ : Dict="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=10 , snake_case__ : Tuple=0.02 , snake_case__ : Tuple=[1, 16, 4, 4] , snake_case__ : Any=None , ):
lowerCamelCase_ : Optional[Any] =parent
lowerCamelCase_ : Dict =batch_size
lowerCamelCase_ : str =image_size
lowerCamelCase_ : Tuple =patch_size
lowerCamelCase_ : Any =num_channels
lowerCamelCase_ : str =is_training
lowerCamelCase_ : List[str] =use_labels
lowerCamelCase_ : Union[str, Any] =hidden_size
lowerCamelCase_ : Optional[Any] =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : List[Any] =intermediate_size
lowerCamelCase_ : str =hidden_act
lowerCamelCase_ : List[Any] =hidden_dropout_prob
lowerCamelCase_ : int =attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] =type_sequence_label_size
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : List[Any] =scope
lowerCamelCase_ : Any =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCamelCase_ : Tuple =(self.image_size // 32) ** 2
lowerCamelCase_ : Dict =num_patches + 1
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : str =None
if self.use_labels:
lowerCamelCase_ : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[int] =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[Any] ={
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase__ , )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : int ):
lowerCamelCase_ : str =ViTHybridModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase_ : Any =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Tuple =self.type_sequence_label_size
lowerCamelCase_ : Any =ViTHybridForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase_ : Optional[Any] =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =config_and_inputs
lowerCamelCase_ : str ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase :Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_UpperCAmelCase :int = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase :str = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :str = False
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =ViTHybridModelTester(self )
lowerCamelCase_ : Any =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ : Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ , lowerCamelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict =model_class(UpperCAmelCase__ )
lowerCamelCase_ : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Tuple =[*signature.parameters.keys()]
lowerCamelCase_ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int =_config_zero_init(UpperCAmelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict =model_class(config=UpperCAmelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCamelCase_ : int =[F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : str =ViTHybridModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _snake_case ( ) -> List[str]:
lowerCamelCase_ : int =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[Any] =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase__ )
lowerCamelCase_ : Union[str, Any] =self.default_image_processor
lowerCamelCase_ : Union[str, Any] =prepare_img()
lowerCamelCase_ : List[str] =image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(**UpperCAmelCase__ )
# verify the logits
lowerCamelCase_ : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowerCamelCase_ : Dict =torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] =ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
lowerCamelCase_ : Any =ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
lowerCamelCase_ : Dict =prepare_img()
lowerCamelCase_ : Union[str, Any] =image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
lowerCamelCase_ : Tuple =model(**UpperCAmelCase__ )
lowerCamelCase_ : Tuple =outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCamelCase_ : str =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 144 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __magic_name__ ( UpperCAmelCase__):
A: Union[str, Any] = "detr"
A: str = ["past_key_values"]
A: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , lowerCamelCase__ : str=True , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[str]=100 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : Any=2048 , lowerCamelCase__ : str=8 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Any=2048 , lowerCamelCase__ : Any=8 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[Any]="relu" , lowerCamelCase__ : int=256 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : Optional[int]=1.0 , lowerCamelCase__ : Any=False , lowerCamelCase__ : Optional[int]="sine" , lowerCamelCase__ : Union[str, Any]="resnet50" , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : int=2 , lowerCamelCase__ : str=0.1 , **lowerCamelCase__ : str , ) -> List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase__ : List[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase__ : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase__ : List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : List[str] = config_class.from_dict(UpperCAmelCase__ )
# set timm attributes to None
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : int = None, None, None
UpperCamelCase__ : Optional[int] = use_timm_backbone
UpperCamelCase__ : Optional[Any] = backbone_config
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : Dict = num_queries
UpperCamelCase__ : Tuple = d_model
UpperCamelCase__ : List[str] = encoder_ffn_dim
UpperCamelCase__ : Any = encoder_layers
UpperCamelCase__ : List[Any] = encoder_attention_heads
UpperCamelCase__ : List[str] = decoder_ffn_dim
UpperCamelCase__ : Union[str, Any] = decoder_layers
UpperCamelCase__ : Dict = decoder_attention_heads
UpperCamelCase__ : Optional[int] = dropout
UpperCamelCase__ : Union[str, Any] = attention_dropout
UpperCamelCase__ : int = activation_dropout
UpperCamelCase__ : Any = activation_function
UpperCamelCase__ : List[str] = init_std
UpperCamelCase__ : int = init_xavier_std
UpperCamelCase__ : Optional[int] = encoder_layerdrop
UpperCamelCase__ : str = decoder_layerdrop
UpperCamelCase__ : Dict = encoder_layers
UpperCamelCase__ : Optional[int] = auxiliary_loss
UpperCamelCase__ : Tuple = position_embedding_type
UpperCamelCase__ : Tuple = backbone
UpperCamelCase__ : Union[str, Any] = use_pretrained_backbone
UpperCamelCase__ : int = dilation
# Hungarian matcher
UpperCamelCase__ : Any = class_cost
UpperCamelCase__ : int = bbox_cost
UpperCamelCase__ : int = giou_cost
# Loss coefficients
UpperCamelCase__ : Optional[int] = mask_loss_coefficient
UpperCamelCase__ : Tuple = dice_loss_coefficient
UpperCamelCase__ : List[Any] = bbox_loss_coefficient
UpperCamelCase__ : Optional[int] = giou_loss_coefficient
UpperCamelCase__ : Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase__ ( cls : int , lowerCamelCase__ : PretrainedConfig , **lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
return cls(backbone_config=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase__ ( self : str ) -> Dict[str, any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase__ : Tuple = self.backbone_config.to_dict()
UpperCamelCase__ : Optional[int] = self.__class__.model_type
return output
class __magic_name__ ( UpperCAmelCase__):
A: Union[str, Any] = version.parse("1.11")
@property
def UpperCAmelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1E-5
@property
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
return 12
| 146 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
_UpperCamelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _A ( UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] = RealmTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
__UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase__ ) != tokenize_chinese_chars
):
__UpperCAmelCase : Optional[int] = getattr(UpperCAmelCase__ , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Tuple = do_lower_case
__UpperCAmelCase : int = strip_accents
__UpperCAmelCase : Optional[Any] = tokenize_chinese_chars
__UpperCAmelCase : Union[str, Any] = normalizer_class(**UpperCAmelCase__ )
__UpperCAmelCase : Optional[int] = do_lower_case
def __A ( self , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = PaddingStrategy.MAX_LENGTH
__UpperCAmelCase : Tuple = text
__UpperCAmelCase : Tuple = kwargs.pop("""text_pair""" , UpperCAmelCase__ )
__UpperCAmelCase : str = kwargs.pop("""return_tensors""" , UpperCAmelCase__ )
__UpperCAmelCase : Any = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(UpperCAmelCase__ ):
if batch_text_pair is not None:
__UpperCAmelCase : Tuple = batch_text_pair[idx]
else:
__UpperCAmelCase : int = None
__UpperCAmelCase : Dict = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
__UpperCAmelCase : Optional[Any] = encoded_candidates.get("""input_ids""" )
__UpperCAmelCase : Tuple = encoded_candidates.get("""attention_mask""" )
__UpperCAmelCase : Optional[Any] = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase__ )
__UpperCAmelCase : str = {key: item for key, item in output_data.items() if len(UpperCAmelCase__ ) != 0}
return BatchEncoding(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase : str = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 254 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A_ ( _lowerCAmelCase : Optional[int] = "" ):
"""simple docstring"""
_a = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_a = BeautifulSoup(requests.get(lowercase_ ).text, '''html.parser''' )
_a = soup.find_all('''td''', attrs='''titleColumn''' )
_a = soup.find_all('''td''', class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_, lowercase_ )
}
def A_ ( _lowerCAmelCase : Optional[Any] = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
_a = get_imdb_top_aaa_movies()
with open(lowercase_, '''w''', newline='''''' ) as out_file:
_a = csv.writer(lowercase_ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 320 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
A__ = (720, 1280) # Height, Width
A__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
A__ = 1 / 100
A__ = """"""
A__ = """"""
A__ = """"""
A__ = 250
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ , snake_case__ : Optional[Any] = get_dataset(lowercase_ , lowercase_ )
for index in range(lowercase_ ):
snake_case__ : Optional[Any] = random.sample(range(len(lowercase_ ) ) , 4 )
snake_case__ , snake_case__ , snake_case__ : List[str] = update_image_and_anno(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , filter_scale=lowercase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ : Dict = random_chars(32 )
snake_case__ : Dict = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
snake_case__ : Optional[Any] = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
snake_case__ : Optional[Any] = []
for anno in new_annos:
snake_case__ : Optional[int] = anno[3] - anno[1]
snake_case__ : Dict = anno[4] - anno[2]
snake_case__ : str = anno[1] + width / 2
snake_case__ : Optional[int] = anno[2] + height / 2
snake_case__ : Optional[int] = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase_ )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[list, list]:
"""simple docstring"""
snake_case__ : int = []
snake_case__ : Optional[Any] = []
for label_file in glob.glob(os.path.join(lowercase_ , '''*.txt''' ) ):
snake_case__ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase_ ) as in_file:
snake_case__ : List[str] = in_file.readlines()
snake_case__ : int = os.path.join(lowercase_ , f"""{label_name}.jpg""" )
snake_case__ : int = []
for obj_list in obj_lists:
snake_case__ : int = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case__ : Tuple = float(obj[1] ) - float(obj[3] ) / 2
snake_case__ : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
snake_case__ : Tuple = float(obj[1] ) + float(obj[3] ) / 2
snake_case__ : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase_ )
labels.append(lowercase_ )
return img_paths, labels
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case__ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Optional[int] = int(scale_x * output_size[1] )
snake_case__ : List[str] = int(scale_y * output_size[0] )
snake_case__ : str = []
snake_case__ : Optional[int] = []
for i, index in enumerate(lowercase_ ):
snake_case__ : str = all_img_list[index]
path_list.append(lowercase_ )
snake_case__ : int = all_annos[index]
snake_case__ : Optional[Any] = cva.imread(lowercase_ )
if i == 0: # top-left
snake_case__ : Tuple = cva.resize(lowercase_ , (divid_point_x, divid_point_y) )
snake_case__ : Optional[Any] = img
for bbox in img_annos:
snake_case__ : int = bbox[1] * scale_x
snake_case__ : str = bbox[2] * scale_y
snake_case__ : Optional[int] = bbox[3] * scale_x
snake_case__ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case__ : Tuple = cva.resize(lowercase_ , (output_size[1] - divid_point_x, divid_point_y) )
snake_case__ : Tuple = img
for bbox in img_annos:
snake_case__ : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : Dict = bbox[2] * scale_y
snake_case__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case__ : int = cva.resize(lowercase_ , (divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Union[str, Any] = img
for bbox in img_annos:
snake_case__ : Optional[int] = bbox[1] * scale_x
snake_case__ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : List[str] = bbox[3] * scale_x
snake_case__ : Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case__ : List[str] = cva.resize(
lowercase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : List[str] = img
for bbox in img_annos:
snake_case__ : int = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : Any = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case__ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
snake_case__ : Union[str, Any] = ascii_lowercase + digits
return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 230 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A : Tuple = 2_5_6_0_4_7
A : int = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( UpperCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =NllbTokenizer
__UpperCAmelCase : Optional[int] =NllbTokenizerFast
__UpperCAmelCase : Union[str, Any] =True
__UpperCAmelCase : List[Any] =True
__UpperCAmelCase : Tuple ={}
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = NllbTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = NllbTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def snake_case ( self ):
__lowerCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
@require_torch
def snake_case ( self ):
if not self.test_seqaseq:
return
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
__lowerCAmelCase = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for"
" Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
__lowerCAmelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase__ , tgt_texts=UpperCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
UpperCAmelCase__ , tgt_texts=UpperCAmelCase__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , UpperCAmelCase__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def snake_case ( self ):
pass
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = [AddedToken("<special>" , lstrip=UpperCAmelCase__ )]
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_r.encode("Hey this is a <special> token" )
__lowerCAmelCase = tokenizer_r.encode("<special>" , add_special_tokens=UpperCAmelCase__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ )
__lowerCAmelCase = tokenizer_p.encode("Hey this is a <special> token" )
__lowerCAmelCase = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] ="""facebook/nllb-200-distilled-600M"""
__UpperCAmelCase : int =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__UpperCAmelCase : Optional[Any] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__UpperCAmelCase : List[Any] =[
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
__lowerCAmelCase = 1
return cls
def snake_case ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def snake_case ( self ):
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
__lowerCAmelCase = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
__lowerCAmelCase = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCAmelCase__ )
__lowerCAmelCase = 10
__lowerCAmelCase = self.tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCAmelCase__ )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
def snake_case ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = NllbTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase__ )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__lowerCAmelCase = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=3 , return_tensors="pt" )
__lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=10 , return_tensors="pt" )
__lowerCAmelCase = targets["input_ids"]
__lowerCAmelCase = shift_tokens_right(
UpperCAmelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
__lowerCAmelCase = False
__lowerCAmelCase = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 57 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase (UpperCAmelCase__ ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE__ = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE__ = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE__ = 1_2_8
SCREAMING_SNAKE_CASE__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
SCREAMING_SNAKE_CASE__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
SCREAMING_SNAKE_CASE__ = train_dataset.select(range(3_2 ) )
SCREAMING_SNAKE_CASE__ = val_dataset.select(range(1_6 ) )
SCREAMING_SNAKE_CASE__ = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase : List[str] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase__ , max_length=5_1_2 )
SCREAMING_SNAKE_CASE__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase__ , max_length=1_2_8 )
SCREAMING_SNAKE_CASE__ = inputs.input_ids
SCREAMING_SNAKE_CASE__ = inputs.attention_mask
SCREAMING_SNAKE_CASE__ = outputs.input_ids
SCREAMING_SNAKE_CASE__ = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE__ = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
SCREAMING_SNAKE_CASE__ = outputs.attention_mask
assert all(len(UpperCAmelCase__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(UpperCAmelCase__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE__ = pred.label_ids
SCREAMING_SNAKE_CASE__ = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase__ ) )] ) / len(UpperCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
SCREAMING_SNAKE_CASE__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase__ , per_device_train_batch_size=UpperCAmelCase__ , per_device_eval_batch_size=UpperCAmelCase__ , predict_with_generate=UpperCAmelCase__ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE__ = SeqaSeqTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
# start training
trainer.train()
| 165 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase__ = 0.01
with locka.acquire():
with pytest.raises(lowercase_ ):
UpperCAmelCase__ = time.time()
locka.acquire(lowercase_ )
assert time.time() - _start > timeout
def lowerCAmelCase ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = "a" * 1000 + ".lock"
UpperCAmelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(lowercase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase_ ):
locka.acquire(0 )
| 169 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_snake_case = 4_2
_snake_case = None
_snake_case = None
SCREAMING_SNAKE_CASE_ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowercase (_lowerCAmelCase ):
if root is None:
return 0
# Validation
def count_nodes(_lowerCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase_ ) != count_coins(lowercase_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_lowerCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCAmelCase , __lowerCAmelCase = get_distrib(node.left )
__lowerCAmelCase , __lowerCAmelCase = get_distrib(node.right )
__lowerCAmelCase = 1 - left_distrib_excess
__lowerCAmelCase = 1 - right_distrib_excess
__lowerCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase_ )
+ abs(lowercase_ )
)
__lowerCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase_ , lowercase_ )
return get_distrib(lowercase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class lowercase_ ( UpperCAmelCase__ ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def lowerCamelCase_ ( self , __UpperCamelCase=None ):
"""simple docstring"""
UpperCamelCase_ = {}
if top_k is not None:
UpperCamelCase_ = top_k
return {}, {}, postprocess_params
def __call__( self , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = load_image(UpperCAmelCase__ )
UpperCamelCase_ = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework )
return model_inputs
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.model(**UpperCAmelCase__ )
return model_outputs
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase_ = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase_ , UpperCamelCase_ = probs.topk(UpperCAmelCase__ )
elif self.framework == "tf":
UpperCamelCase_ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCamelCase_ = tf.math.top_k(UpperCAmelCase__ , k=UpperCAmelCase__ )
UpperCamelCase_ , UpperCamelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCamelCase_ = scores.tolist()
UpperCamelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
| 122 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a ( UpperCAmelCase__, unittest.TestCase ):
UpperCAmelCase_ : Dict =XGLMTokenizer
UpperCAmelCase_ : Any =XGLMTokenizerFast
UpperCAmelCase_ : List[str] =True
UpperCAmelCase_ : Tuple =True
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowercase = '<pad>'
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_8 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def UpperCamelCase_ ( self ):
lowercase = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCamelCase_ ( self ):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def UpperCamelCase_ ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase__ , f.name )
lowercase = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase__ )
lowercase = pickle.dumps(UpperCAmelCase__ )
pickle.loads(UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = 'I was born in 92000, and this is falsé.'
lowercase = tokenizer.tokenize(UpperCAmelCase__ )
lowercase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(UpperCAmelCase__ )
lowercase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCamelCase_ ( self ):
lowercase = 'Hello World!'
lowercase = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def UpperCamelCase_ ( self ):
lowercase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def UpperCamelCase_ ( self ):
lowercase = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='facebook/xglm-564M' , padding=UpperCAmelCase__ , )
| 220 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A__ : Tuple = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Union[str, Any]=None , ) -> Optional[Any]:
if attention_mask is None:
lowerCamelCase_ : List[str] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase_ : List[Any] =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase_ : List[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ : int =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ : Tuple =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase__ :
def __init__( self : Tuple , snake_case__ : str , snake_case__ : Optional[int]=13 , snake_case__ : str=7 , snake_case__ : List[str]=True , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=99 , snake_case__ : Tuple=16 , snake_case__ : Optional[Any]=2 , snake_case__ : Tuple=4 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[int]="gelu" , snake_case__ : int=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Any=32 , snake_case__ : Any=2 , snake_case__ : Any=1 , snake_case__ : Tuple=0 , snake_case__ : Dict=0.02 , ):
lowerCamelCase_ : int =parent
lowerCamelCase_ : Any =batch_size
lowerCamelCase_ : str =seq_length
lowerCamelCase_ : List[str] =is_training
lowerCamelCase_ : List[Any] =use_labels
lowerCamelCase_ : str =vocab_size
lowerCamelCase_ : Optional[int] =hidden_size
lowerCamelCase_ : int =num_hidden_layers
lowerCamelCase_ : int =num_attention_heads
lowerCamelCase_ : Union[str, Any] =intermediate_size
lowerCamelCase_ : Any =hidden_act
lowerCamelCase_ : Dict =hidden_dropout_prob
lowerCamelCase_ : int =attention_probs_dropout_prob
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Optional[Any] =eos_token_id
lowerCamelCase_ : Any =pad_token_id
lowerCamelCase_ : str =bos_token_id
lowerCamelCase_ : Union[str, Any] =initializer_range
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Tuple =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase_ : List[str] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase_ : Union[str, Any] =shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
lowerCamelCase_ : Optional[int] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
lowerCamelCase_ : Optional[int] =prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Dict =20
lowerCamelCase_ : Tuple =model_class_name(UpperCAmelCase__ )
lowerCamelCase_ : int =model.encode(inputs_dict["input_ids"] )
lowerCamelCase_ , lowerCamelCase_ : Dict =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase_ : Optional[Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ : Tuple =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCamelCase_ : List[str] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
lowerCamelCase_ : Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase_ : List[str] =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
lowerCamelCase_ : List[str] =model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ : Union[str, Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Any =20
lowerCamelCase_ : Dict =model_class_name(UpperCAmelCase__ )
lowerCamelCase_ : Optional[int] =model.encode(inputs_dict["input_ids"] )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase_ : Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_ : Optional[int] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ : Optional[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : Dict =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
lowerCamelCase_ : Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase_ : Tuple =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
lowerCamelCase_ : int =model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
lowerCamelCase_ : List[str] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowercase__ ( unittest.TestCase ):
_UpperCAmelCase :List[Any] = 99
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase_ : List[str] =input_ids.shape[0]
lowerCamelCase_ : Optional[int] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =self._get_config_and_data()
lowerCamelCase_ : str =FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__ )
lowerCamelCase_ : Optional[Any] =lm_model(input_ids=UpperCAmelCase__ )
lowerCamelCase_ : List[Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Optional[int] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase_ : int =FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__ )
lowerCamelCase_ : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCamelCase_ : Tuple =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase_ : Union[str, Any] =lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
lowerCamelCase_ : Union[str, Any] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCamelCase_ : Optional[Any] =shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
lowerCamelCase_ : List[str] =np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
lowerCamelCase_ : Any =np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase__ ( UpperCAmelCase__, unittest.TestCase, UpperCAmelCase__ ):
_UpperCAmelCase :Optional[int] = True
_UpperCAmelCase :List[str] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCAmelCase :List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Tuple =FlaxBlenderbotSmallModelTester(self )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ , lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Union[str, Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ : Tuple =model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(snake_case__ : Tuple , snake_case__ : List[Any]=None , **snake_case__ : str ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ : Optional[int] =encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ : str =encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Optional[int] =model_class(UpperCAmelCase__ )
lowerCamelCase_ : Union[str, Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCamelCase_ : Optional[Any] ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case__ : int , snake_case__ : Any , snake_case__ : Any ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ : int =decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ : Union[str, Any] =decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Dict =model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase_ : List[Any] =np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase_ : Optional[int] =model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : str = 9
UpperCamelCase__ : Any = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase__ : Optional[Any] = kruskal(lowercase_ , lowercase_ )
UpperCamelCase__ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowercase_ ) == sorted(lowercase_ )
| 146 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase = 10
def lowercase_ ( lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Any = 1
__UpperCAmelCase : Optional[int] = max(lowercase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase : Any = [[] for _ in range(lowercase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(lowercase_ )
# put each buckets' contents into list_of_ints
__UpperCAmelCase : Any = 0
for b in range(lowercase_ ):
for i in buckets[b]:
__UpperCAmelCase : Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case = """"""
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __lowerCamelCase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = " " ) -> Union[str, Any]:
_a = sentence_delimiter
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
return list(UpperCAmelCase__ )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
_a = []
for sent_idx, sentence in enumerate(UpperCAmelCase__ ):
chars.extend(self.process_string(UpperCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__snake_case = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__snake_case = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Any:
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , )["wer"]
_a = 0
_a = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_a = jiwer.compute_measures(
UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 320 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
snake_case__ : Any = [144, 192, 240]
snake_case__ : List[str] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
snake_case__ : List[str] = [96, 120, 144]
snake_case__ : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
snake_case__ : str = [64, 80, 96]
snake_case__ : str = [16, 16, 24, 48, 64, 80, 320]
snake_case__ : List[str] = 0.05
snake_case__ : List[Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
snake_case__ : Optional[Any] = 512
snake_case__ : List[str] = 16
snake_case__ : Tuple = 21
snake_case__ : Any = '''pascal-voc-id2label.json'''
else:
snake_case__ : Any = 1000
snake_case__ : Optional[int] = '''imagenet-1k-id2label.json'''
snake_case__ : Tuple = '''huggingface/label-files'''
snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Optional[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
snake_case__ : str = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
snake_case__ : int = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
snake_case__ : List[str] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
snake_case__ : List[Any] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
snake_case__ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
snake_case__ : Optional[int] = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
snake_case__ : Optional[Any] = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
snake_case__ : Dict = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
snake_case__ : List[Any] = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
snake_case__ : int = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
snake_case__ : Dict = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
snake_case__ : int = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
snake_case__ : Tuple = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
snake_case__ : List[str] = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
snake_case__ : Any = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
snake_case__ : int = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
snake_case__ : List[Any] = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if f""".global_rep.{i}.bias""" in name:
snake_case__ : Union[str, Any] = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
snake_case__ : int = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
snake_case__ : Any = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
snake_case__ : Dict = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
snake_case__ : str = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
snake_case__ : Union[str, Any] = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
snake_case__ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
snake_case__ : str = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
snake_case__ : Any = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
snake_case__ : str = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
snake_case__ : List[str] = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
snake_case__ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
snake_case__ : Any = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
snake_case__ : Optional[int] = '''mobilevit.''' + name
return name
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
if base_model:
snake_case__ : Union[str, Any] = ''''''
else:
snake_case__ : List[str] = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
snake_case__ : List[str] = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
snake_case__ : Union[str, Any] = key[8:]
if "qkv" in key:
snake_case__ : Optional[int] = key.split('''.''' )
snake_case__ : Dict = int(key_split[0][6:] ) - 1
snake_case__ : Dict = int(key_split[3] )
snake_case__ : Union[str, Any] = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
snake_case__ : str = layer.transformer.layer[transformer_num].attention.attention.all_head_size
snake_case__ : List[Any] = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
snake_case__ : Tuple = val[:dim, :]
snake_case__ : List[Any] = val[dim : dim * 2, :]
snake_case__ : Dict = val[-dim:, :]
else:
snake_case__ : str = val[:dim]
snake_case__ : Optional[Any] = val[dim : dim * 2]
snake_case__ : Union[str, Any] = val[-dim:]
else:
snake_case__ : str = val
return orig_state_dict
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = get_mobilevit_config(lowercase_ )
# load original state_dict
snake_case__ : Any = torch.load(lowercase_ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
snake_case__ : List[Any] = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
snake_case__ : Any = MobileViTForImageClassification(lowercase_ ).eval()
snake_case__ : Any = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case__ : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ : Union[str, Any] = model(**lowercase_ )
snake_case__ : Any = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
snake_case__ : Any = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
snake_case__ : Optional[Any] = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
snake_case__ : Union[str, Any] = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
snake_case__ : Optional[int] = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
snake_case__ : List[str] = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
snake_case__ : List[str] = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
snake_case__ : Optional[Any] = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
snake_case__ : str = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization='''apple''' )
model.push_to_hub(lowercase_ , organization='''apple''' )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 230 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A : Dict = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
A : Any = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
A : List[Any] = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def snake_case ( self , __a , __a , __a=None , __a=1 , __a="binary" , __a=None ):
__lowerCAmelCase = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 57 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : Tuple = 'facebook/bart-large-mnli'
lowerCamelCase__ : List[str] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
lowerCamelCase__ : str = 'text_classifier'
lowerCamelCase__ : int = AutoTokenizer
lowerCamelCase__ : Union[str, Any] = AutoModelForSequenceClassification
lowerCamelCase__ : Any = ['text', ['text']]
lowerCamelCase__ : Optional[int] = ['text']
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setup()
SCREAMING_SNAKE_CASE__ = self.model.config
SCREAMING_SNAKE_CASE__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
SCREAMING_SNAKE_CASE__ = int(UpperCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = labels
return self.pre_processor(
[text] * len(UpperCAmelCase__ ) , [F"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 165 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCAmelCase__ = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : Tuple = make_dataset()
def lowerCAmelCase ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
for triplet in permutations(lowercase_ , 3 ):
if sum(lowercase_ ) == target:
return tuple(sorted(lowercase_ ) )
return (0, 0, 0)
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
arr.sort()
UpperCAmelCase__ = len(lowercase_ )
for i in range(n - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase__ = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase__ = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase__ = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=1_0000 )
UpperCAmelCase__ = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=1_0000 )
return (min(lowercase_ ), min(lowercase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 169 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ) -> List[str]:
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self , snake_case_ = 1 , snake_case_ = 100 , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__lowerCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCAmelCase = audio_length_in_s * self.unet.config.sample_rate
__lowerCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__lowerCAmelCase = int(UpperCAmelCase__ )
if sample_size % down_scale_factor != 0:
__lowerCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
__lowerCAmelCase = int(UpperCAmelCase__ )
__lowerCAmelCase = next(iter(self.unet.parameters() ) ).dtype
__lowerCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ , device=audio.device )
__lowerCAmelCase = self.scheduler.timesteps.to(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCAmelCase = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCAmelCase = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
__lowerCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCAmelCase__ )
| 301 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
_A = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase_ ( UpperCAmelCase__ ):
A__ : List[str] = """perceiver"""
def __init__( self , __UpperCamelCase=2_5_6 , __UpperCamelCase=1_2_8_0 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1 , __UpperCamelCase=2_6 , __UpperCamelCase=8 , __UpperCamelCase=8 , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="kv" , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=True , __UpperCamelCase=2_6_2 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=5_6 , __UpperCamelCase=[3_6_8, 4_9_6] , __UpperCamelCase=1_6 , __UpperCamelCase=1_9_2_0 , __UpperCamelCase=1_6 , __UpperCamelCase=[1, 1_6, 2_2_4, 2_2_4] , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
UpperCamelCase_ = num_latents
UpperCamelCase_ = d_latents
UpperCamelCase_ = d_model
UpperCamelCase_ = num_blocks
UpperCamelCase_ = num_self_attends_per_block
UpperCamelCase_ = num_self_attention_heads
UpperCamelCase_ = num_cross_attention_heads
UpperCamelCase_ = qk_channels
UpperCamelCase_ = v_channels
UpperCamelCase_ = cross_attention_shape_for_attention
UpperCamelCase_ = self_attention_widening_factor
UpperCamelCase_ = cross_attention_widening_factor
UpperCamelCase_ = hidden_act
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = use_query_residual
# masked language modeling attributes
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
# image classification attributes
UpperCamelCase_ = image_size
# flow attributes
UpperCamelCase_ = train_size
# multimodal autoencoding attributes
UpperCamelCase_ = num_frames
UpperCamelCase_ = audio_samples_per_frame
UpperCamelCase_ = samples_per_patch
UpperCamelCase_ = output_shape
class lowercase_ ( UpperCAmelCase__ ):
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 1e-4
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = 3 , __UpperCamelCase = 4_0 , __UpperCamelCase = 4_0 , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_ = preprocessor.num_special_tokens_to_add(UpperCAmelCase__ )
UpperCamelCase_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_ = [""" """.join(["""a"""] ) * seq_length] * batch_size
UpperCamelCase_ = dict(preprocessor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
UpperCamelCase_ = inputs.pop("""input_ids""" )
return inputs
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase_ = compute_effective_axis_dimension(UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase_ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = dict(preprocessor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
UpperCamelCase_ = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 122 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCAmelCase_ : Dict ="luke"
def __init__( self , _lowerCamelCase=5_0_2_6_7 , _lowerCamelCase=5_0_0_0_0_0 , _lowerCamelCase=7_6_8 , _lowerCamelCase=2_5_6 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase = vocab_size
lowercase = entity_vocab_size
lowercase = hidden_size
lowercase = entity_emb_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = use_entity_aware_attention
lowercase = classifier_dropout
| 220 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
class lowercase__ ( UpperCAmelCase__ ):
_UpperCAmelCase :Dict = "encoder-decoder"
_UpperCAmelCase :Optional[int] = True
def __init__( self : List[str] , **snake_case__ : Union[str, Any] ):
super().__init__(**UpperCAmelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase_ : Union[str, Any] =kwargs.pop("encoder" )
lowerCamelCase_ : str =encoder_config.pop("model_type" )
lowerCamelCase_ : Tuple =kwargs.pop("decoder" )
lowerCamelCase_ : Optional[int] =decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase_ : int =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCamelCase_ : Optional[Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCamelCase_ : Optional[Any] =True
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Union[str, Any] ):
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCamelCase_ : int =True
lowerCamelCase_ : Optional[int] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : List[Any] =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : List[Any] =self.encoder.to_dict()
lowerCamelCase_ : Optional[Any] =self.decoder.to_dict()
lowerCamelCase_ : Union[str, Any] =self.__class__.model_type
return output
| 144 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 0 |
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ : Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ : Union[str, Any] = 1
if upper_limit > 0:
UpperCamelCase__ : List[str] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
__UpperCamelCase : List[Any] = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 146 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _A ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Dict = min_resolution
__UpperCAmelCase : List[str] = max_resolution
__UpperCAmelCase : str = do_resize
__UpperCAmelCase : int = size
__UpperCAmelCase : Optional[Any] = do_normalize
__UpperCAmelCase : List[Any] = image_mean
__UpperCAmelCase : Optional[int] = image_std
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Any = rescale_factor
__UpperCAmelCase : List[Any] = do_pad
def __A ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
__UpperCAmelCase : Union[str, Any] = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.size
else:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase : int = int(self.size["""shortest_edge"""] * h / w )
__UpperCAmelCase : Tuple = self.size["""shortest_edge"""]
elif w > h:
__UpperCAmelCase : Union[str, Any] = self.size["""shortest_edge"""]
__UpperCAmelCase : Optional[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__UpperCAmelCase : Optional[int] = self.size["""shortest_edge"""]
__UpperCAmelCase : int = self.size["""shortest_edge"""]
else:
__UpperCAmelCase : Union[str, Any] = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase : int = max(UpperCAmelCase__ , key=lambda __UpperCAmelCase : item[0] )[0]
__UpperCAmelCase : List[str] = max(UpperCAmelCase__ , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( UpperCAmelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = DeformableDetrImageProcessor if is_vision_available() else None
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = DeformableDetrImageProcessingTester(self )
@property
def __A ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_rescale""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """size""" ) )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
__UpperCAmelCase : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
__UpperCAmelCase : Optional[int] = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
__UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : str = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase : int = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : int = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase : Any = json.loads(f.read() )
__UpperCAmelCase : str = {"""image_id""": 39_769, """annotations""": target}
# encode them
__UpperCAmelCase : List[str] = DeformableDetrImageProcessor()
__UpperCAmelCase : List[Any] = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase : Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase__ )
__UpperCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
__UpperCAmelCase : str = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase__ ) )
# verify boxes
__UpperCAmelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase__ )
__UpperCAmelCase : Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase : Optional[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase__ ) )
# verify is_crowd
__UpperCAmelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase__ ) )
# verify class_labels
__UpperCAmelCase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase__ ) )
# verify orig_size
__UpperCAmelCase : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase__ ) )
# verify size
__UpperCAmelCase : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase__ ) )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase : List[str] = json.loads(f.read() )
__UpperCAmelCase : str = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
__UpperCAmelCase : Optional[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__UpperCAmelCase : Dict = DeformableDetrImageProcessor(format="""coco_panoptic""" )
__UpperCAmelCase : List[str] = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase : Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase__ )
__UpperCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
__UpperCAmelCase : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase__ ) )
# verify boxes
__UpperCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase__ )
__UpperCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase : List[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase__ ) )
# verify is_crowd
__UpperCAmelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase__ ) )
# verify class_labels
__UpperCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase__ ) )
# verify masks
__UpperCAmelCase : List[Any] = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase__ )
# verify orig_size
__UpperCAmelCase : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase__ ) )
# verify size
__UpperCAmelCase : Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase__ ) )
| 254 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any ):
"""simple docstring"""
_a = [0] * no_of_processes
_a = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase_ ):
_a = burst_time[i]
_a = 0
_a = 0
_a = 9_99_99_99_99
_a = 0
_a = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_a = remaining_time[j]
_a = j
_a = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_a = remaining_time[short]
if minm == 0:
_a = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
_a = False
# Find finish time of current process
_a = increment_time + 1
# Calculate waiting time
_a = finish_time - arrival_time[short]
_a = finar - burst_time[short]
if waiting_time[short] < 0:
_a = 0
# Increment time
increment_time += 1
return waiting_time
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = [0] * no_of_processes
for i in range(lowercase_ ):
_a = burst_time[i] + waiting_time[i]
return turn_around_time
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : str, _lowerCAmelCase : Tuple ):
"""simple docstring"""
_a = 0
_a = 0
for i in range(lowercase_ ):
_a = total_waiting_time + waiting_time[i]
_a = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('''Average turn around time =''', total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__snake_case = int(input())
__snake_case = [0] * no_of_processes
__snake_case = [0] * no_of_processes
__snake_case = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__snake_case = map(int, input().split())
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = burst_time
__snake_case = no_of_processes
__snake_case = waiting_time
__snake_case = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs) | 320 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = ["""vqvae"""]
def __init__( self :Optional[int] ,__lowercase :AutoencoderKL ,__lowercase :UNetaDConditionModel ,__lowercase :Mel ,__lowercase :Union[DDIMScheduler, DDPMScheduler] ,):
super().__init__()
self.register_modules(unet=UpperCAmelCase__ ,scheduler=UpperCAmelCase__ ,mel=UpperCAmelCase__ ,vqvae=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[Any] ):
return 5_0 if isinstance(self.scheduler ,UpperCAmelCase__ ) else 1_0_0_0
@torch.no_grad()
def __call__( self :Tuple ,__lowercase :int = 1 ,__lowercase :str = None ,__lowercase :np.ndarray = None ,__lowercase :int = 0 ,__lowercase :int = 0 ,__lowercase :int = None ,__lowercase :torch.Generator = None ,__lowercase :float = 0 ,__lowercase :float = 0 ,__lowercase :torch.Generator = None ,__lowercase :float = 0 ,__lowercase :torch.Tensor = None ,__lowercase :torch.Tensor = None ,__lowercase :Union[str, Any]=True ,):
snake_case__ : Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase__ )
snake_case__ : List[str] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
snake_case__ : str = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case__ : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=UpperCAmelCase__ ,device=self.device ,)
snake_case__ : List[Any] = noise
snake_case__ : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase__ ,UpperCAmelCase__ )
snake_case__ : Optional[Any] = self.mel.audio_slice_to_image(UpperCAmelCase__ )
snake_case__ : Tuple = np.frombuffer(input_image.tobytes() ,dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
snake_case__ : Optional[int] = (input_image / 2_5_5) * 2 - 1
snake_case__ : List[str] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
snake_case__ : int = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase__ ,0 ) ).latent_dist.sample(
generator=UpperCAmelCase__ )[0]
snake_case__ : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case__ : int = self.scheduler.add_noise(UpperCAmelCase__ ,UpperCAmelCase__ ,self.scheduler.timesteps[start_step - 1] )
snake_case__ : Any = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case__ : int = int(mask_start_secs * pixels_per_second )
snake_case__ : Optional[Any] = int(mask_end_secs * pixels_per_second )
snake_case__ : Optional[Any] = self.scheduler.add_noise(UpperCAmelCase__ ,UpperCAmelCase__ ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,UpperCAmelCase__ ):
snake_case__ : Any = self.unet(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )['''sample''']
else:
snake_case__ : str = self.unet(UpperCAmelCase__ ,UpperCAmelCase__ )['''sample''']
if isinstance(self.scheduler ,UpperCAmelCase__ ):
snake_case__ : Dict = self.scheduler.step(
model_output=UpperCAmelCase__ ,timestep=UpperCAmelCase__ ,sample=UpperCAmelCase__ ,eta=UpperCAmelCase__ ,generator=UpperCAmelCase__ ,)['''prev_sample''']
else:
snake_case__ : List[str] = self.scheduler.step(
model_output=UpperCAmelCase__ ,timestep=UpperCAmelCase__ ,sample=UpperCAmelCase__ ,generator=UpperCAmelCase__ ,)['''prev_sample''']
if mask is not None:
if mask_start > 0:
snake_case__ : List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case__ : Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case__ : Any = 1 / self.vqvae.config.scaling_factor * images
snake_case__ : Any = self.vqvae.decode(UpperCAmelCase__ )['''sample''']
snake_case__ : List[str] = (images / 2 + 0.5).clamp(0 ,1 )
snake_case__ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
snake_case__ : Tuple = (images * 2_5_5).round().astype('''uint8''' )
snake_case__ : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase__ ,mode='''RGB''' ).convert('''L''' ) for _ in images) )
snake_case__ : Dict = [self.mel.image_to_audio(UpperCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase__ )[:, np.newaxis, :] ) ,**ImagePipelineOutput(UpperCAmelCase__ ) )
@torch.no_grad()
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Image.Image] ,__lowercase :int = 5_0 ):
assert isinstance(self.scheduler ,UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ )
snake_case__ : int = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
snake_case__ : str = (sample / 2_5_5) * 2 - 1
snake_case__ : int = torch.Tensor(UpperCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
snake_case__ : Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case__ : Tuple = self.scheduler.alphas_cumprod[t]
snake_case__ : Any = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case__ : Dict = 1 - alpha_prod_t
snake_case__ : List[Any] = self.unet(UpperCAmelCase__ ,UpperCAmelCase__ )['''sample''']
snake_case__ : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case__ : Tuple = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case__ : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( __lowercase :torch.Tensor ,__lowercase :torch.Tensor ,__lowercase :float ):
snake_case__ : Optional[int] = acos(torch.dot(torch.flatten(UpperCAmelCase__ ) ,torch.flatten(UpperCAmelCase__ ) ) / torch.norm(UpperCAmelCase__ ) / torch.norm(UpperCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCAmelCase__ ) + sin(alpha * theta ) * xa / sin(UpperCAmelCase__ )
| 230 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : str = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 57 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Union[str, Any] = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
A_ : str = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
UpperCAmelCase__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCAmelCase__ = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCAmelCase__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCAmelCase__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCAmelCase__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCAmelCase__ = tf.placeholder("float64" , [dim] )
UpperCAmelCase__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCAmelCase__ = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCAmelCase__ = tf.placeholder("int32" )
UpperCAmelCase__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCAmelCase__ = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCAmelCase__ = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCAmelCase__ = tf.placeholder("float" , [dim] )
UpperCAmelCase__ = tf.placeholder("float" , [dim] )
UpperCAmelCase__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCAmelCase__ = tf.placeholder("float" , [noofclusters] )
UpperCAmelCase__ = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCAmelCase__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCAmelCase__ = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
UpperCAmelCase__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCAmelCase__ = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCAmelCase__ = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
UpperCAmelCase__ = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCAmelCase__ = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCAmelCase__ = sess.run(lowercase_ )
UpperCAmelCase__ = sess.run(lowercase_ )
return centroids, assignments
| 169 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 0 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
SCREAMING_SNAKE_CASE_ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = list(s_dict.keys() )
for key in keys:
__lowerCAmelCase = r""".*/layers_(\d+)"""
__lowerCAmelCase = key
if re.match(lowercase_ , lowercase_ ):
__lowerCAmelCase = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , lowercase_ )
__lowerCAmelCase = r"""(encoder|decoder)\/"""
if re.match(lowercase_ , lowercase_ ):
__lowerCAmelCase = re.match(lowercase_ , lowercase_ ).groups()
if groups[0] == "encoder":
__lowerCAmelCase = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , lowercase_ )
__lowerCAmelCase = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , lowercase_ )
elif groups[0] == "decoder":
__lowerCAmelCase = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , lowercase_ )
__lowerCAmelCase = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , lowercase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowerCAmelCase = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
__lowerCAmelCase = s_dict.pop(lowercase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowerCAmelCase = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowerCAmelCase = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowerCAmelCase = s_dict[key].shape[0]
__lowerCAmelCase = s_dict[key]
for idx in range(lowercase_ ):
__lowerCAmelCase = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(lowercase_ )
return s_dict
SCREAMING_SNAKE_CASE_ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
import regex as re
with open(lowercase_ , """r""" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = re.findall(r"""(.*) = ([0-9.]*)""" , lowercase_ )
__lowerCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowerCAmelCase = float(lowercase_ ) if """.""" in value else int(lowercase_ )
__lowerCAmelCase = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , lowercase_ )[0]
__lowerCAmelCase = str(activation[1] )
__lowerCAmelCase = num_experts
__lowerCAmelCase = SwitchTransformersConfig(**lowercase_ )
return config
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="./" , _lowerCAmelCase=8 ):
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__lowerCAmelCase = checkpoints.load_tax_checkpoint(lowercase_ )
if gin_file is not None:
__lowerCAmelCase = convert_gin_to_config(lowercase_ , lowercase_ )
else:
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained(lowercase_ )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration(lowercase_ )
__lowerCAmelCase = flax_params["""target"""]
__lowerCAmelCase = flatten_dict(lowercase_ , sep="""/""" )
__lowerCAmelCase = rename_keys(lowercase_ )
__lowerCAmelCase = unflatten_dict(lowercase_ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase_ , lowercase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 301 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
_A = 65_521
def lowerCamelCase__ ( a__ : Any ) -> int:
UpperCamelCase_ = 1
UpperCamelCase_ = 0
for plain_chr in plain_text:
UpperCamelCase_ = (a + ord(lowercase_ )) % MOD_ADLER
UpperCamelCase_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 122 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a ( UpperCAmelCase__ ):
UpperCAmelCase_ : Union[str, Any] =42
class a ( UpperCAmelCase__, UpperCAmelCase__ ):
@register_to_config
def __init__( self , _lowerCamelCase = 3_2 , _lowerCamelCase = 6_4 , _lowerCamelCase = 2_0 , _lowerCamelCase = 7_6_8 , _lowerCamelCase=7_7 , _lowerCamelCase=4 , _lowerCamelCase = 0.0 , _lowerCamelCase = "silu" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "linear" , _lowerCamelCase = "prd" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ):
super().__init__()
lowercase = num_attention_heads
lowercase = attention_head_dim
lowercase = num_attention_heads * attention_head_dim
lowercase = additional_embeddings
lowercase = time_embed_dim or inner_dim
lowercase = embedding_proj_dim or embedding_dim
lowercase = clip_embed_dim or embedding_dim
lowercase = Timesteps(UpperCAmelCase__ , UpperCAmelCase__ , 0 )
lowercase = TimestepEmbedding(UpperCAmelCase__ , UpperCAmelCase__ , out_dim=UpperCAmelCase__ , act_fn=UpperCAmelCase__ )
lowercase = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
if embedding_proj_norm_type is None:
lowercase = None
elif embedding_proj_norm_type == "layer":
lowercase = nn.LayerNorm(UpperCAmelCase__ )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
lowercase = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
if encoder_hid_proj_type is None:
lowercase = None
elif encoder_hid_proj_type == "linear":
lowercase = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
lowercase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCAmelCase__ ) )
if added_emb_type == "prd":
lowercase = nn.Parameter(torch.zeros(1 , 1 , UpperCAmelCase__ ) )
elif added_emb_type is None:
lowercase = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
lowercase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , activation_fn='gelu' , attention_bias=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__ )
] )
if norm_in_type == "layer":
lowercase = nn.LayerNorm(UpperCAmelCase__ )
elif norm_in_type is None:
lowercase = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
lowercase = nn.LayerNorm(UpperCAmelCase__ )
lowercase = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
lowercase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCAmelCase__ , persistent=UpperCAmelCase__ )
lowercase = nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
lowercase = nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self ):
lowercase = {}
def fn_recursive_add_processors(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if hasattr(UpperCAmelCase__ , 'set_processor' ):
lowercase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , UpperCAmelCase__ , UpperCAmelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return processors
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = len(self.attn_processors.keys() )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(UpperCAmelCase__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if hasattr(UpperCAmelCase__ , 'set_processor' ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
module.set_processor(UpperCAmelCase__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , UpperCAmelCase__ , UpperCAmelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ):
lowercase = hidden_states.shape[0]
lowercase = timestep
if not torch.is_tensor(UpperCAmelCase__ ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCAmelCase__ ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(UpperCAmelCase__ , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.time_proj(UpperCAmelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase = timesteps_projected.to(dtype=self.dtype )
lowercase = self.time_embedding(UpperCAmelCase__ )
if self.embedding_proj_norm is not None:
lowercase = self.embedding_proj_norm(UpperCAmelCase__ )
lowercase = self.embedding_proj(UpperCAmelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase = self.encoder_hidden_states_proj(UpperCAmelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowercase = self.proj_in(UpperCAmelCase__ )
lowercase = self.positional_embedding.to(hidden_states.dtype )
lowercase = []
lowercase = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCAmelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase = hidden_states[:, None, :]
lowercase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCAmelCase__ , -1 , -1 )
additional_embeds.append(UpperCAmelCase__ )
lowercase = torch.cat(
UpperCAmelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase = F.pad(
UpperCAmelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowercase = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
lowercase = F.pad(UpperCAmelCase__ , (0, self.additional_embeddings) , value=0.0 )
lowercase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowercase = self.norm_in(UpperCAmelCase__ )
for block in self.transformer_blocks:
lowercase = block(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowercase = self.norm_out(UpperCAmelCase__ )
if self.prd_embedding is not None:
lowercase = hidden_states[:, -1]
else:
lowercase = hidden_states[:, additional_embeddings_len:]
lowercase = self.proj_to_clip_embeddings(UpperCAmelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCAmelCase__ )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 220 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase__ ( UpperCAmelCase__, UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , snake_case__ : int = 768 , ):
super().__init__()
lowerCamelCase_ : Union[str, Any] =nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
lowerCamelCase_ : Any =nn.Parameter(torch.ones(1 , UpperCAmelCase__ ) )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Union[str, torch.device]] = None , snake_case__ : Optional[torch.dtype] = None , ):
lowerCamelCase_ : Optional[Any] =nn.Parameter(self.mean.to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) )
lowerCamelCase_ : str =nn.Parameter(self.std.to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) )
return self
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Tuple ):
lowerCamelCase_ : List[str] =(embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase__ ( self : str , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Tuple =(embeds * self.std) + self.mean
return embeds
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : int = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ["""OwlViTFeatureExtractor"""]
__UpperCamelCase : str = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 146 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _A ( UpperCAmelCase__ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : int = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
__UpperCAmelCase : Optional[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__UpperCAmelCase : Optional[Any] = token_dict["""token"""]
__UpperCAmelCase : List[str] = Tokenizer(Unigram() )
__UpperCAmelCase : Optional[int] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
__UpperCAmelCase : List[Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase__ ),
pre_tokenizers.Punctuation(),
] )
__UpperCAmelCase : int = decoders.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
__UpperCAmelCase : Optional[Any] = TemplateProcessing(
single=f'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
__UpperCAmelCase : int = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = 8_000 , __UpperCAmelCase = True , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__UpperCAmelCase : int = [files]
self._tokenizer.train(UpperCAmelCase__ , trainer=UpperCAmelCase__ )
self.add_unk_id()
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = 8_000 , __UpperCAmelCase = True , ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
self._tokenizer.train_from_iterator(UpperCAmelCase__ , trainer=UpperCAmelCase__ )
self.add_unk_id()
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = json.loads(self._tokenizer.to_str() )
__UpperCAmelCase : Optional[Any] = self.special_tokens["""unk"""]["""id"""]
__UpperCAmelCase : str = Tokenizer.from_str(json.dumps(UpperCAmelCase__ ) )
| 254 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_a = 3
_a = 250
_a = ids_tensor((batch_size, length) , UpperCAmelCase__ )
_a = torch.ones((batch_size, length) , device=UpperCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def _UpperCAmelCase ( self ) -> Dict:
_a , _a = self._get_tensors(5 )
_a = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a , _a = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a , _a = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _UpperCAmelCase ( self ) -> int:
_a = MaxLengthCriteria(max_length=10 )
_a , _a = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a , _a = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a , _a = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_a , _a = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a , _a = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a , _a = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a , _a = self._get_tensors(5 )
_a = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _UpperCAmelCase ( self ) -> Dict:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_a = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase__ ) , 1 ) | 320 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : int = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : int = """LayoutLMv3ImageProcessor"""
__lowerCAmelCase : Tuple = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self :Optional[int] ,__lowercase :int=None ,__lowercase :List[str]=None ,**__lowercase :str ):
snake_case__ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,UpperCAmelCase__ ,)
snake_case__ : int = kwargs.pop('''feature_extractor''' )
snake_case__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ ,UpperCAmelCase__ )
def __call__( self :Any ,__lowercase :Dict ,__lowercase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__lowercase :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,__lowercase :Union[List[List[int]], List[List[List[int]]]] = None ,__lowercase :Optional[Union[List[int], List[List[int]]]] = None ,__lowercase :bool = True ,__lowercase :Union[bool, str, PaddingStrategy] = False ,__lowercase :Union[bool, str, TruncationStrategy] = None ,__lowercase :Optional[int] = None ,__lowercase :int = 0 ,__lowercase :Optional[int] = None ,__lowercase :Optional[bool] = None ,__lowercase :Optional[bool] = None ,__lowercase :bool = False ,__lowercase :bool = False ,__lowercase :bool = False ,__lowercase :bool = False ,__lowercase :bool = True ,__lowercase :Optional[Union[str, TensorType]] = None ,**__lowercase :List[str] ,):
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
snake_case__ : Tuple = self.image_processor(images=UpperCAmelCase__ ,return_tensors=UpperCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
snake_case__ : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ : int = features['''words''']
snake_case__ : int = self.tokenizer(
text=text if text is not None else features['''words'''] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features['''boxes'''] ,word_labels=UpperCAmelCase__ ,add_special_tokens=UpperCAmelCase__ ,padding=UpperCAmelCase__ ,truncation=UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,stride=UpperCAmelCase__ ,pad_to_multiple_of=UpperCAmelCase__ ,return_token_type_ids=UpperCAmelCase__ ,return_attention_mask=UpperCAmelCase__ ,return_overflowing_tokens=UpperCAmelCase__ ,return_special_tokens_mask=UpperCAmelCase__ ,return_offsets_mapping=UpperCAmelCase__ ,return_length=UpperCAmelCase__ ,verbose=UpperCAmelCase__ ,return_tensors=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
# add pixel values
snake_case__ : Union[str, Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
snake_case__ : str = self.get_overflowing_images(UpperCAmelCase__ ,encoded_inputs['''overflow_to_sample_mapping'''] )
snake_case__ : Optional[Any] = images
return encoded_inputs
def __lowerCamelCase ( self :int ,__lowercase :Dict ,__lowercase :List[str] ):
snake_case__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(UpperCAmelCase__ )} and {len(UpperCAmelCase__ )}""" )
return images_with_overflow
def __lowerCamelCase ( self :int ,*__lowercase :List[Any] ,**__lowercase :Dict ):
return self.tokenizer.batch_decode(*UpperCAmelCase__ ,**UpperCAmelCase__ )
def __lowerCamelCase ( self :Dict ,*__lowercase :Optional[Any] ,**__lowercase :Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase__ ,**UpperCAmelCase__ )
@property
def __lowerCamelCase ( self :Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCamelCase ( self :int ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,UpperCAmelCase__ ,)
return self.image_processor_class
@property
def __lowerCamelCase ( self :Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,UpperCAmelCase__ ,)
return self.image_processor
| 230 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14 | 0 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
__lowerCAmelCase = a * a - b * b + x
__lowerCAmelCase = 2 * a * b + y
__lowerCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCamelCase ( _UpperCamelCase = 800 , _UpperCamelCase = 600 , _UpperCamelCase = -0.6 , _UpperCamelCase = 0 , _UpperCamelCase = 3.2 , _UpperCamelCase = 50 , _UpperCamelCase = True , ):
'''simple docstring'''
__lowerCAmelCase = Image.new("RGB" , (image_width, image_height) )
__lowerCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
__lowerCAmelCase = figure_width / image_width * image_height
__lowerCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowerCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowerCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowerCAmelCase = get_color_coded_rgb(lowercase_ )
else:
__lowerCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 57 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A_ : Dict = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A_ : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
A_ : Dict = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
A_ : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
A_ : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
A_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
A_ : Tuple = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A_ : List[str] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
A_ : str = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
A_ : Optional[int] = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class lowerCamelCase :
def __call__( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Union[bool, str] = False , __UpperCAmelCase : Union[bool, str] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[bool] = None , **__UpperCAmelCase : List[str] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else [titles]
SCREAMING_SNAKE_CASE__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else [texts]
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else [questions] * n_passages
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
F"""There should be as many titles than texts but got {len(UpperCAmelCase__ )} titles and {len(UpperCAmelCase__ )} texts.""" )
SCREAMING_SNAKE_CASE__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )["""input_ids"""]
SCREAMING_SNAKE_CASE__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )["""input_ids"""]
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : BatchEncoding , __UpperCAmelCase : DPRReaderOutput , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
SCREAMING_SNAKE_CASE__ = reader_input["""input_ids"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reader_output[:3]
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = sorted(range(UpperCAmelCase__ ) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
SCREAMING_SNAKE_CASE__ = []
for start_index, start_score in enumerate(UpperCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ = sorted(UpperCAmelCase__ , key=lambda __UpperCAmelCase : x[1] , reverse=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class lowerCamelCase (UpperCAmelCase__ ,UpperCAmelCase__ ):
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : int = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Optional[int] = ['input_ids', 'attention_mask']
| 165 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 0 |
def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = 0
while b > 0:
if b & 1:
UpperCAmelCase__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 169 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 | 0 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = args.pruning_method
__lowerCAmelCase = args.threshold
__lowerCAmelCase = args.model_name_or_path.rstrip("""/""" )
__lowerCAmelCase = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
__lowerCAmelCase = torch.load(os.path.join(lowercase_ , """pytorch_model.bin""" ) )
__lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCAmelCase = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
__lowerCAmelCase = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
__lowerCAmelCase = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
__lowerCAmelCase = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
__lowerCAmelCase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[f"""{prefix_}mask_scores"""]
__lowerCAmelCase = TopKBinarizer.apply(lowercase_ , lowercase_ )
__lowerCAmelCase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[f"""{prefix_}mask_scores"""]
__lowerCAmelCase = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
__lowerCAmelCase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[f"""{prefix_}mask_scores"""]
__lowerCAmelCase , __lowerCAmelCase = -0.1, 1.1
__lowerCAmelCase = torch.sigmoid(lowercase_ )
__lowerCAmelCase = s * (r - l) + l
__lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
__lowerCAmelCase = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__lowerCAmelCase = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
main(args)
| 301 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_A = ["""gpt2"""]
_A = """gpt2"""
if is_tf_available():
class lowercase_ ( tf.Module ):
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = tokenizer
UpperCamelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_ = TFGPTaLMHeadModel.from_config(UpperCAmelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.tokenizer(UpperCAmelCase__ )
UpperCamelCase_ = tokenized["""input_ids"""].to_tensor()
UpperCamelCase_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCamelCase_ = self.model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = [GPTaTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCamelCase_ = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase_ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we\'re going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
UpperCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCamelCase_ = tokenizer([test_inputs] , return_tensors="""tf""" )
UpperCamelCase_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCamelCase_ = python_outputs[key].numpy()
UpperCamelCase_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase_ = tf.function(UpperCAmelCase__ )
for test_inputs in self.test_sentences:
UpperCamelCase_ = tf.constant(UpperCAmelCase__ )
UpperCamelCase_ = compiled_tokenizer(UpperCAmelCase__ )
UpperCamelCase_ = tf_tokenizer(UpperCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase_ = ModelToSave(tokenizer=UpperCAmelCase__ )
UpperCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase_ = model.serving(UpperCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase_ = Path(UpperCAmelCase__ ) / """saved.model"""
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": model.serving} )
UpperCamelCase_ = tf.saved_model.load(UpperCAmelCase__ )
UpperCamelCase_ = loaded_model.signatures["""serving_default"""](UpperCAmelCase__ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase_ = tf_tokenizer(UpperCAmelCase__ ) # Build model with some sample inputs
UpperCamelCase_ = tf_tokenizer.get_config()
UpperCamelCase_ = TFGPTaTokenizer.from_config(UpperCAmelCase__ )
UpperCamelCase_ = model_from_config(UpperCAmelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCamelCase_ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase_ = tf_tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
UpperCamelCase_ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 122 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class a :
def __init__( self , _lowerCamelCase ):
lowercase = data
lowercase = None
lowercase = None
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
print('\n********Press N to stop entering at any point of time********\n' )
lowercase = input('Enter the value of the root node: ' ).strip().lower()
lowercase = queue.Queue()
lowercase = TreeNode(int(lowercase_ ) )
q.put(lowercase_ )
while not q.empty():
lowercase = q.get()
lowercase = f'Enter the left node of {node_found.data}: '
lowercase = input(lowercase_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase = TreeNode(int(lowercase_ ) )
lowercase = left_node
q.put(lowercase_ )
lowercase = f'Enter the right node of {node_found.data}: '
lowercase = input(lowercase_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase = TreeNode(int(lowercase_ ) )
lowercase = right_node
q.put(lowercase_ )
raise
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
lowercase = queue.Queue()
q.put(lowercase_ )
while not q.empty():
lowercase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
lowercase = queue.Queue()
q.put(lowercase_ )
while not q.empty():
lowercase = []
while not q.empty():
lowercase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase_ )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
lowercase = []
lowercase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(lowercase_ )
lowercase = n.left
# end of while means current node doesn't have left child
lowercase = stack.pop()
# start to traverse its right child
lowercase = n.right
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
lowercase = []
lowercase = node
while n or stack:
while n:
stack.append(lowercase_ )
lowercase = n.left
lowercase = stack.pop()
print(n.data , end=',' )
lowercase = n.right
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
lowercase , lowercase = [], []
lowercase = node
stacka.append(lowercase_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowercase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] = "" , __snake_case : Any=50 , __snake_case : Optional[int]="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
lowercase , lowercase = divmod(width - len(lowercase_ ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
_UpperCamelCase : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 5_0 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 220 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 0 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Union[str, Any] ) -> int:
assert column_title.isupper()
lowerCamelCase_ : int =0
lowerCamelCase_ : str =len(lowercase_ ) - 1
lowerCamelCase_ : Union[str, Any] =0
while index >= 0:
lowerCamelCase_ : List[Any] =(ord(column_title[index] ) - 64) * pow(26 , lowercase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 144 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 0 |
import baseaa
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return baseaa.baadecode(lowercase_ ).decode('''utf-8''' )
if __name__ == "__main__":
__UpperCamelCase : List[str] = """Hello World!"""
__UpperCamelCase : Union[str, Any] = baseaa_encode(test)
print(encoded)
__UpperCamelCase : Union[str, Any] = baseaa_decode(encoded)
print(decoded)
| 146 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _A ( UpperCAmelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = BartphoTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = True
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__UpperCAmelCase : List[Any] = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__UpperCAmelCase : Dict = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__UpperCAmelCase : Union[str, Any] = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
__UpperCAmelCase : Union[str, Any] = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = """This is a là test"""
__UpperCAmelCase : List[Any] = """This is a<unk><unk> test"""
return input_text, output_text
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__UpperCAmelCase : Dict = """This is a là test"""
__UpperCAmelCase : Union[str, Any] = """▁This ▁is ▁a ▁l à ▁t est""".split()
__UpperCAmelCase : Tuple = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCAmelCase : Dict = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
| 254 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
A_ : int = 'mask2former'
A_ : Any = ['swin']
A_ : Optional[int] = {'hidden_size': 'hidden_dim'}
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 1024 , __UpperCAmelCase = "relu" , __UpperCAmelCase = 6 , __UpperCAmelCase = 10 , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 2048 , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = 4 , __UpperCAmelCase = 255 , __UpperCAmelCase = 100 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 2.0 , __UpperCAmelCase = 5.0 , __UpperCAmelCase = 5.0 , __UpperCAmelCase = 12544 , __UpperCAmelCase = 3.0 , __UpperCAmelCase = 0.75 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = True , __UpperCAmelCase = [4, 8, 16, 32] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
_a = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a = backbone_config.pop('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
F'Supported model types: {",".join(self.backbones_supported )}' )
_a = backbone_config
_a = feature_size
_a = mask_feature_size
_a = hidden_dim
_a = encoder_feedforward_dim
_a = activation_function
_a = encoder_layers
_a = decoder_layers
_a = num_attention_heads
_a = dropout
_a = dim_feedforward
_a = pre_norm
_a = enforce_input_projection
_a = common_stride
_a = ignore_value
_a = num_queries
_a = no_object_weight
_a = class_weight
_a = mask_weight
_a = dice_weight
_a = train_num_points
_a = oversample_ratio
_a = importance_sample_ratio
_a = init_std
_a = init_xavier_std
_a = use_auxiliary_loss
_a = feature_strides
_a = output_auxiliary_logits
_a = decoder_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def _UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
return cls(
backbone_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def _UpperCAmelCase ( self ) -> Dict[str, any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output | 320 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
A__ = """src/transformers"""
# Matches is_xxx_available()
A__ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
A__ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A__ = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
A__ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
A__ = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A__ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
A__ = re.compile('''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
A__ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
A__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
A__ = re.compile(r'''^\s*try:''')
# Catches a line with else:
A__ = re.compile(r'''^\s*else:''')
def _lowerCAmelCase ( __lowerCAmelCase ) -> Any:
"""simple docstring"""
if _re_test_backend.search(lowercase_ ) is None:
return None
snake_case__ : str = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Optional[Any] = f.readlines()
snake_case__ : Optional[Any] = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case__ : Dict = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
snake_case__ : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
snake_case__ : List[Any] = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
snake_case__ : Optional[Any] = re.findall('''\[([^\]]+)\]''' , lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
snake_case__ : Any = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
snake_case__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
snake_case__ : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case__ : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case__ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case__ : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
snake_case__ : Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
snake_case__ : Tuple = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(''', ''' )
snake_case__ : List[str] = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
snake_case__ : Union[str, Any] = _re_between_brackets.search(lowercase_ ).groups()[0].split(''', ''' )
snake_case__ : Union[str, Any] = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
snake_case__ : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case__ : int = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
snake_case__ : Optional[int] = lines[line_index]
snake_case__ : Optional[Any] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case__ : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case__ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case__ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
snake_case__ : Optional[int] = lines[line_index]
snake_case__ : Dict = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case__ : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
def find_duplicates(__lowerCAmelCase ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case__ : Union[str, Any] = []
for key in import_dict_objects.keys():
snake_case__ : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
snake_case__ : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case__ : Tuple = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
snake_case__ : List[Any] = os.path.join(lowercase_ , '''__init__.py''' )
snake_case__ : Any = parse_init(lowercase_ )
if objects is not None:
snake_case__ : Any = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
snake_case__ : Union[str, Any] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError('''\n\n'''.join(lowercase_ ) )
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
snake_case__ : Dict = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
snake_case__ : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
snake_case__ : Tuple = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
snake_case__ : Optional[int] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowercase_ )
return submodules
A__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(lowercase_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
snake_case__ : Union[str, Any] = spec.loader.load_module()
snake_case__ : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase_ ) > 0:
snake_case__ : Any = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 230 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : Union[str, Any] = logging.get_logger(__name__)
A : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
A : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
A : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
A : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
A : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
A : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] =DPRContextEncoderTokenizer
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : str =DPRQuestionEncoderTokenizer
A : int = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
A : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
A : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class _UpperCamelCase :
'''simple docstring'''
def __call__( self , __a , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , **__a , ):
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
__lowerCAmelCase = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else [titles]
__lowerCAmelCase = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else [texts]
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else [questions] * n_passages
assert len(UpperCAmelCase__ ) == len(
UpperCAmelCase__ ), f"There should be as many titles than texts but got {len(UpperCAmelCase__ )} titles and {len(UpperCAmelCase__ )} texts."
__lowerCAmelCase = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )["input_ids"]
__lowerCAmelCase = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
def snake_case ( self , __a , __a , __a = 16 , __a = 64 , __a = 4 , ):
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = sorted(range(UpperCAmelCase__ ) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case ( self , __a , __a , __a , __a , ):
__lowerCAmelCase = []
for start_index, start_score in enumerate(UpperCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(UpperCAmelCase__ , key=lambda __a : x[1] , reverse=UpperCAmelCase__ )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class _UpperCamelCase ( UpperCAmelCase__ ,UpperCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =VOCAB_FILES_NAMES
__UpperCAmelCase : Any =READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] =READER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : int =["""input_ids""", """attention_mask"""]
__UpperCAmelCase : Optional[int] =DPRReaderTokenizer
| 57 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
A_ : List[str] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : List[Any] = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
lowerCamelCase__ : Tuple = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCamelCase__ : Any = field(
default=1_2_8 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowerCamelCase__ : Tuple = field(
default=UpperCAmelCase__ ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ = self.task_name.lower()
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : str = 'train'
lowerCamelCase__ : str = 'dev'
lowerCamelCase__ : int = 'test'
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : List[Any] = 4_2
lowerCamelCase__ : List[str] = 4_2
lowerCamelCase__ : Dict = 4_2
def __init__( self : int , __UpperCAmelCase : GlueDataTrainingArguments , __UpperCAmelCase : PreTrainedTokenizerBase , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Split] = Split.train , __UpperCAmelCase : Optional[str] = None , ) -> str:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE__ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
SCREAMING_SNAKE_CASE__ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
SCREAMING_SNAKE_CASE__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + """.lock"""
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = torch.load(UpperCAmelCase__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
SCREAMING_SNAKE_CASE__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE__ = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE__ = examples[:limit_length]
SCREAMING_SNAKE_CASE__ = glue_convert_examples_to_features(
UpperCAmelCase__ , UpperCAmelCase__ , max_length=args.max_seq_length , label_list=UpperCAmelCase__ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE__ = time.time()
torch.save(self.features , UpperCAmelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[Any] ) -> Dict:
return len(self.features )
def __getitem__( self : int , __UpperCAmelCase : int ) -> InputFeatures:
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return self.label_list
| 165 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = """▁"""
_lowerCAmelCase : List[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_lowerCAmelCase : Optional[int] = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class _UpperCamelCase ( UpperCAmelCase__ ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self :Union[str, Any] , lowerCamelCase :Dict , lowerCamelCase :List[str]="</s>" , lowerCamelCase :Optional[Any]="<unk>" , lowerCamelCase :List[str]=[] , lowerCamelCase :Optional[Dict[str, Any]] = None , **lowerCamelCase :Dict , ) -> None:
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def UpperCAmelCase_ ( self :List[Any] ) -> List[str]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self :Optional[Any] ) -> Dict[str, int]:
UpperCAmelCase__ = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Any ) -> Dict:
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self :int , lowerCamelCase :List[str] ) -> Dict:
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Any ) -> int:
return self.sp_model.piece_to_id(UpperCAmelCase__ )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :List[str] ) -> Tuple:
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(UpperCAmelCase__ )
return token
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Dict ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
UpperCAmelCase__ = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :str , lowerCamelCase :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 169 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 301 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_A = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
_A = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def lowerCamelCase__ ( a__ : Tuple ) -> int:
UpperCamelCase_ = list(state_dict.keys() )
for name in state_dict_keys:
UpperCamelCase_ = state_dict.pop(lowercase_ )
# emb -> embedding
if name.startswith("""emb.""" ):
UpperCamelCase_ = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
UpperCamelCase_ = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
UpperCamelCase_ = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , lowercase_ )
# ffn -> feed_forward
UpperCamelCase_ = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , lowercase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
UpperCamelCase_ = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
UpperCamelCase_ = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
UpperCamelCase_ = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
UpperCamelCase_ = """rwkv.""" + name
UpperCamelCase_ = weight
return state_dict
def lowerCamelCase__ ( a__ : Tuple , a__ : Optional[int] , a__ : List[str] , a__ : Tuple=None , a__ : List[str]=None , a__ : Dict=False , a__ : str=None ) -> Any:
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
UpperCamelCase_ = 5_0277
UpperCamelCase_ = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
UpperCamelCase_ = PreTrainedTokenizerFast(tokenizer_file=lowercase_ )
UpperCamelCase_ = len(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
# 2. Build the config
UpperCamelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCamelCase_ = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
UpperCamelCase_ = RwkvConfig(
vocab_size=lowercase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase_ )
# 3. Download model file then convert state_dict
UpperCamelCase_ = hf_hub_download(lowercase_ , lowercase_ )
UpperCamelCase_ = torch.load(lowercase_ , map_location="""cpu""" )
UpperCamelCase_ = convert_state_dict(lowercase_ )
# 4. Split in shards and save
UpperCamelCase_ , UpperCamelCase_ = shard_checkpoint(lowercase_ )
for shard_file, shard in shards.items():
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
if index is not None:
UpperCamelCase_ = os.path.join(lowercase_ , lowercase_ )
# Save the index as well
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase_ = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.""" )
UpperCamelCase_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCamelCase_ = torch.load(os.path.join(lowercase_ , lowercase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase_ , lowercase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(lowercase_ )
model.push_to_hub(lowercase_ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(lowercase_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_A = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 122 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
_UpperCamelCase : Optional[Any] = {
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
_UpperCamelCase : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class a ( UpperCAmelCase__ ):
UpperCAmelCase_ : List[str] =VOCAB_FILES_NAMES
UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict =PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Union[str, Any] =RoFormerTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or pre_tok_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
):
lowercase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowercase = do_lower_case
lowercase = strip_accents
lowercase = pre_tok_class(**UpperCAmelCase__ )
lowercase = do_lower_case
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = BertPreTokenizer()
return state
def __setstate__( self , _lowerCamelCase ):
lowercase = d
lowercase = self.__dict__['_tokenizer'].get_vocab()
lowercase = PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase__ ) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ):
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , **_lowerCamelCase , ):
lowercase = BertPreTokenizer()
return super().save_pretrained(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
| 220 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from random import randint, random
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] = False , lowerCamelCase__ : int = False , lowerCamelCase__ : List[str] = 5 , ) -> list:
lowerCamelCase_ : Optional[int] =[[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase_ : Dict =0
lowerCamelCase_ : Optional[Any] =max(lowercase_ , 0 )
while i < number_of_cells:
lowerCamelCase_ : List[str] =(
randint(0 , lowercase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ) -> int:
lowerCamelCase_ : Optional[int] =0
lowerCamelCase_ : Tuple =highway_now[car_index + 1 :]
for cell in range(len(lowercase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase_ , -1 )
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) -> list:
lowerCamelCase_ : str =len(lowercase_ )
# Beforce calculations, the highway is empty
lowerCamelCase_ : Tuple =[-1] * number_of_cells
for car_index in range(lowercase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase_ : Dict =min(highway_now[car_index] + 1 , lowercase_ )
# Number of empty cell before the next car
lowerCamelCase_ : Tuple =get_distance(lowercase_ , lowercase_ ) - 1
# We can't have the car causing an accident
lowerCamelCase_ : Any =min(next_highway[car_index] , lowercase_ )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase_ : Tuple =max(next_highway[car_index] - 1 , 0 )
return next_highway
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ) -> list:
lowerCamelCase_ : Optional[Any] =len(highway[0] )
for i in range(lowercase_ ):
lowerCamelCase_ : Dict =update(highway[i] , lowercase_ , lowercase_ )
lowerCamelCase_ : Optional[Any] =[-1] * number_of_cells
for car_index in range(lowercase_ ):
lowerCamelCase_ : List[str] =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase_ : Tuple =(car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase_ : int =speed
highway.append(lowercase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : Dict = tmp_path / '''file.csv'''
UpperCamelCase__ : Any = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = tmp_path / '''malformed_file.csv'''
UpperCamelCase__ : Any = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : Any = tmp_path / '''csv_with_image.csv'''
UpperCamelCase__ : str = textwrap.dedent(
F"\\n image\n {image_file}\n " )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ : Any = tmp_path / '''csv_with_label.csv'''
UpperCamelCase__ : List[str] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = tmp_path / '''csv_with_int_list.csv'''
UpperCamelCase__ : Tuple = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Csv()
UpperCamelCase__ : List[str] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowercase_ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowercase_ ) in record.message
for record in caplog.records )
@require_pil
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with open(lowercase_ , encoding='''utf-8''' ) as f:
UpperCamelCase__ : List[Any] = f.read().splitlines()[1]
UpperCamelCase__ : List[str] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
UpperCamelCase__ : str = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase__ : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
UpperCamelCase__ : Any = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(lowercase_ , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Any = f.read().splitlines()[1:]
UpperCamelCase__ : str = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
UpperCamelCase__ : Union[str, Any] = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase__ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
UpperCamelCase__ : Tuple = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowercase_ ) for label in labels]
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : Dict = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE : [int(lowercase_ ) for i in x.split()]} )
UpperCamelCase__ : str = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase__ : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
UpperCamelCase__ : Dict = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 146 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _A ( UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = "nat"
_SCREAMING_SNAKE_CASE : Optional[int] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=[3, 4, 6, 5] , __UpperCAmelCase=[2, 4, 8, 16] , __UpperCAmelCase=7 , __UpperCAmelCase=3.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> str:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : str = embed_dim
__UpperCAmelCase : List[str] = depths
__UpperCAmelCase : List[str] = len(UpperCAmelCase__ )
__UpperCAmelCase : Optional[Any] = num_heads
__UpperCAmelCase : int = kernel_size
__UpperCAmelCase : Any = mlp_ratio
__UpperCAmelCase : int = qkv_bias
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = drop_path_rate
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
__UpperCAmelCase : int = layer_scale_init_value
__UpperCAmelCase : Optional[int] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase : List[Any] = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 254 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : str ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def A_ ( ):
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f'| 0 | 0 | {nor_gate(0, 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0, 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1, 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1, 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 320 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> tuple[int, float, str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = cipher_alphabet or [chr(lowercase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case__ : Dict = {
'''a''': 0.08_497,
'''b''': 0.01_492,
'''c''': 0.02_202,
'''d''': 0.04_253,
'''e''': 0.11_162,
'''f''': 0.02_228,
'''g''': 0.02_015,
'''h''': 0.06_094,
'''i''': 0.07_546,
'''j''': 0.00_153,
'''k''': 0.01_292,
'''l''': 0.04_025,
'''m''': 0.02_406,
'''n''': 0.06_749,
'''o''': 0.07_507,
'''p''': 0.01_929,
'''q''': 0.00_095,
'''r''': 0.07_587,
'''s''': 0.06_327,
'''t''': 0.09_356,
'''u''': 0.02_758,
'''v''': 0.00_978,
'''w''': 0.02_560,
'''x''': 0.00_150,
'''y''': 0.01_994,
'''z''': 0.00_077,
}
else:
# Custom frequencies dictionary
snake_case__ : int = frequencies_dict
if not case_sensitive:
snake_case__ : Dict = ciphertext.lower()
# Chi squared statistic values
snake_case__ : List[Any] = {}
# cycle through all of the shifts
for shift in range(len(lowercase_ ) ):
snake_case__ : Union[str, Any] = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case__ : List[str] = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case__ : Any = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case__ : Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ : List[str] = decrypted_with_shift.lower().count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ : List[str] = decrypted_with_shift.count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case__ : int = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowerCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case__ : Dict = min(
lowercase_ , key=lowercase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case__
) , (
snake_case__
) ,
) : Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 230 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , "html.parser" )
__lowerCAmelCase = soup.find("div" , attrs={"class": "gs_ri"} )
__lowerCAmelCase = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
A : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 57 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A_ : Tuple = logging.get_logger(__name__)
A_ : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
A_ : str = {
"""facebook/blenderbot_small-90M""": 512,
}
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str = BlenderbotSmallTokenizer
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]="<|endoftext|>" , __UpperCAmelCase : Union[str, Any]="<|endoftext|>" , __UpperCAmelCase : Any="<|endoftext|>" , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : int=True , **__UpperCAmelCase : Dict , ) -> Tuple:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]=None ) -> Dict:
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 165 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _UpperCamelCase :
def __init__( self :Optional[int] , lowerCamelCase :List[Any] , lowerCamelCase :int = 13 , lowerCamelCase :int = 64 , lowerCamelCase :int = 2 , lowerCamelCase :int = 3 , lowerCamelCase :int = 3 , lowerCamelCase :bool = True , lowerCamelCase :bool = True , lowerCamelCase :int = 128 , lowerCamelCase :Optional[Any]=[16, 32, 64, 128] , lowerCamelCase :int = 7 , lowerCamelCase :int = 4 , lowerCamelCase :int = 37 , lowerCamelCase :str = "gelu" , lowerCamelCase :float = 0.1 , lowerCamelCase :float = 0.1 , lowerCamelCase :int = 10 , lowerCamelCase :float = 0.02 , lowerCamelCase :int = 2 , lowerCamelCase :int = 1 , lowerCamelCase :int = 128 , lowerCamelCase :List[int] = [2, 2, 2, 2] , lowerCamelCase :int = 2 , lowerCamelCase :int = 2 , ) -> List[Any]:
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = encoder_stride
UpperCAmelCase__ = num_attention_outputs
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = embed_dim + 1
UpperCAmelCase__ = resolution
UpperCAmelCase__ = depths
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = dim
UpperCAmelCase__ = mlp_expansion_ratio
def UpperCAmelCase_ ( self :List[Any] ) -> str:
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self :int ) -> str:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Dict , lowerCamelCase :List[str] , lowerCamelCase :Dict ) -> Dict:
UpperCAmelCase__ = TFEfficientFormerModel(config=UpperCAmelCase__ )
UpperCAmelCase__ = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :int , lowerCamelCase :Dict , lowerCamelCase :Dict , lowerCamelCase :str ) -> Union[str, Any]:
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = TFEfficientFormerForImageClassification(UpperCAmelCase__ )
UpperCAmelCase__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = TFEfficientFormerForImageClassification(UpperCAmelCase__ )
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self :int ) -> List[str]:
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase__ = TFEfficientFormerModelTester(self )
UpperCAmelCase__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self :int ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def UpperCAmelCase_ ( self :List[str] ) -> Dict:
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self :Any ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(UpperCAmelCase__ )
UpperCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self :str ) -> Any:
def check_hidden_states_output(lowerCamelCase :Optional[Any] , lowerCamelCase :Any , lowerCamelCase :Dict ):
UpperCAmelCase__ = model_class(UpperCAmelCase__ )
UpperCAmelCase__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) , training=UpperCAmelCase__ )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
UpperCAmelCase__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
UpperCAmelCase__ = seq_length * self.model_tester.chunk_length
else:
UpperCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCAmelCase__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple) )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
UpperCAmelCase__ = getattr(self.model_tester , "seq_length" , UpperCAmelCase__ )
UpperCAmelCase__ = getattr(self.model_tester , "decoder_seq_length" , UpperCAmelCase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Dict , lowerCamelCase :Dict , lowerCamelCase :Dict=False ) -> int:
UpperCAmelCase__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def UpperCAmelCase_ ( self :str ) -> str:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self :Any ) -> Tuple:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self :Tuple ) -> Optional[int]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase_ ( self :Any ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , "seq_length" , UpperCAmelCase__ )
UpperCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , UpperCAmelCase__ )
UpperCAmelCase__ = getattr(self.model_tester , "key_length" , UpperCAmelCase__ )
UpperCAmelCase__ = getattr(self.model_tester , "chunk_length" , UpperCAmelCase__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
UpperCAmelCase__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(UpperCAmelCase__ )
UpperCAmelCase__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) , training=UpperCAmelCase__ )
UpperCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(UpperCAmelCase__ )
UpperCAmelCase__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) , training=UpperCAmelCase__ )
UpperCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCAmelCase_ ( self :List[str] ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCAmelCase__ = model_class(UpperCAmelCase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCAmelCase__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCAmelCase__ = model(UpperCAmelCase__ )
self.assertTrue(outputs_dict is not None )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self :List[str] ) -> Any:
UpperCAmelCase__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=UpperCAmelCase__ , return_tensors="tf" )
# forward pass
UpperCAmelCase__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__ )
# verify the logits
UpperCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
UpperCAmelCase__ = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self :Dict ) -> int:
UpperCAmelCase__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=UpperCAmelCase__ , return_tensors="tf" )
# forward pass
UpperCAmelCase__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__ )
# verify the logits
UpperCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
UpperCAmelCase__ = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 169 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Dict:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ) -> str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = DistilBertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = DistilBertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
__lowerCAmelCase = DistilBertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DistilBertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DistilBertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = DistilBertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def A__ ( self ) -> List[str]:
__lowerCAmelCase = DistilBertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def A__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def A__ ( self ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
def A__ ( self ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
@slow
def A__ ( self ) -> Tuple:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowerCAmelCase = True
__lowerCAmelCase = model_class(config=UpperCAmelCase__ )
__lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , """traced_model.pt""" ) )
__lowerCAmelCase = torch.jit.load(os.path.join(UpperCAmelCase__ , """traced_model.pt""" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["""input_ids"""].to(UpperCAmelCase__ ) , inputs_dict["""attention_mask"""].to(UpperCAmelCase__ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ) -> int:
__lowerCAmelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__lowerCAmelCase = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 ) )
| 301 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Union[str, Any] ) -> Tuple:
UpperCamelCase_ = []
for part_id in partition_order:
UpperCamelCase_ = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowercase_ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) -> str:
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase_ = spark.range(100 ).repartition(1 )
UpperCamelCase_ = Spark(lowercase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) -> Dict:
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase_ = spark.range(10 ).repartition(2 )
UpperCamelCase_ = [1, 0]
UpperCamelCase_ = _generate_iterable_examples(lowercase_ , lowercase_ ) # Reverse the partitions.
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , lowercase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) -> Dict:
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase_ = spark.range(10 ).repartition(1 )
UpperCamelCase_ = SparkExamplesIterable(lowercase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase_ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) -> Optional[Any]:
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
UpperCamelCase_ = lambda a__ : x.reverse()
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [2, 1, 0] )
UpperCamelCase_ = SparkExamplesIterable(lowercase_ ).shuffle_data_sources(lowercase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase_ ):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) -> Tuple:
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCamelCase_ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase_ ):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase_ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase_ ):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) -> str:
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase_ = spark.range(100 ).repartition(1 )
UpperCamelCase_ = Spark(lowercase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 122 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 0 |
"""simple docstring"""
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = name
lowercase = val
def __str__( self ):
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , _lowerCamelCase ):
return self.val < other.val
class a :
def __init__( self , _lowerCamelCase ):
lowercase = {}
lowercase = {}
lowercase = self.build_heap(UpperCAmelCase__ )
def __getitem__( self , _lowerCamelCase ):
return self.get_value(UpperCAmelCase__ )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return (idx - 1) // 2
def UpperCamelCase_ ( self , _lowerCamelCase ):
return idx * 2 + 1
def UpperCamelCase_ ( self , _lowerCamelCase ):
return idx * 2 + 2
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.heap_dict[key]
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = len(UpperCAmelCase__ ) - 1
lowercase = self.get_parent_idx(UpperCAmelCase__ )
for idx, i in enumerate(UpperCAmelCase__ ):
lowercase = idx
lowercase = i.val
for i in range(UpperCAmelCase__ , -1 , -1 ):
self.sift_down(UpperCAmelCase__ , UpperCAmelCase__ )
return array
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
while True:
lowercase = self.get_left_child_idx(UpperCAmelCase__ ) # noqa: E741
lowercase = self.get_right_child_idx(UpperCAmelCase__ )
lowercase = idx
if l < len(UpperCAmelCase__ ) and array[l] < array[idx]:
lowercase = l
if r < len(UpperCAmelCase__ ) and array[r] < array[smallest]:
lowercase = r
if smallest != idx:
lowercase , lowercase = array[smallest], array[idx]
(
(
lowercase
) , (
lowercase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase = smallest
else:
break
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.get_parent_idx(UpperCAmelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase , lowercase = self.heap[idx], self.heap[p]
lowercase , lowercase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase = p
lowercase = self.get_parent_idx(UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
return self.heap[0]
def UpperCamelCase_ ( self ):
lowercase , lowercase = self.heap[-1], self.heap[0]
lowercase , lowercase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCamelCase_ ( self , _lowerCamelCase ):
self.heap.append(UpperCAmelCase__ )
lowercase = len(self.heap ) - 1
lowercase = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCamelCase_ ( self ):
return len(self.heap ) == 0
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase = new_value
lowercase = new_value
self.sift_up(self.idx_of_element[node] )
_UpperCamelCase : Optional[Any] = Node('R', -1)
_UpperCamelCase : int = Node('B', 6)
_UpperCamelCase : Dict = Node('A', 3)
_UpperCamelCase : Union[str, Any] = Node('X', 1)
_UpperCamelCase : Any = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_UpperCamelCase : Dict = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A__ : List[Any] = """sshleifer/bart-tiny-random"""
A__ : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return AutoConfig.from_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , *lowerCamelCase_ : int =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ , *lowerCamelCase_ : List[Any] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ , *lowerCamelCase_ : Union[str, Any] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ , *lowerCamelCase_ : Optional[int] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase__ ( self : str ):
with self.assertRaises(UpperCAmelCase__ ):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__ )
| 144 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__):
@register_to_config
def __init__( self : int , *,
lowerCamelCase__ : int = 4 , lowerCamelCase__ : int = 768 , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , ) -> Any:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Optional[Any] = nn.Parameter(torch.zeros(UpperCAmelCase__ ) )
# parameters for additional clip time embeddings
UpperCamelCase__ : int = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase__ : Dict = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
# parameters for encoder hidden states
UpperCamelCase__ : List[Any] = clip_extra_context_tokens
UpperCamelCase__ : Union[str, Any] = nn.Linear(
UpperCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase__ : Dict = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase__ : str = nn.LayerNorm(UpperCAmelCase__ )
def UpperCAmelCase__ ( self : int , *, lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase__ : Any = image_embeddings.shape[0]
UpperCamelCase__ : List[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase__ : Tuple = classifier_free_guidance_embeddings.expand(
UpperCAmelCase__ , -1 )
UpperCamelCase__ : List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase__ : Optional[Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase__ : Tuple = self.embedding_proj(UpperCAmelCase__ )
UpperCamelCase__ : Union[str, Any] = self.clip_image_embeddings_project_to_time_embeddings(UpperCAmelCase__ )
UpperCamelCase__ : int = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase__ : Optional[Any] = self.clip_extra_context_tokens_proj(UpperCAmelCase__ )
UpperCamelCase__ : Tuple = clip_extra_context_tokens.reshape(UpperCAmelCase__ , -1 , self.clip_extra_context_tokens )
UpperCamelCase__ : Optional[int] = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase__ : List[Any] = self.encoder_hidden_states_proj(UpperCAmelCase__ )
UpperCamelCase__ : Dict = self.text_encoder_hidden_states_norm(UpperCAmelCase__ )
UpperCamelCase__ : int = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 146 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase_ ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__UpperCAmelCase : Dict = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
__UpperCAmelCase : List[str] = PipelineDataFormat.from_str(
format=lowercase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase_ , lowercase_ )
class _A ( UpperCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = nlp
__UpperCAmelCase : Dict = reader
@staticmethod
def __A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[str] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=UpperCAmelCase__ , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=UpperCAmelCase__ , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=UpperCAmelCase__ , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=UpperCAmelCase__ , help="""Name or path to the model\'s config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=UpperCAmelCase__ , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=UpperCAmelCase__ , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=UpperCAmelCase__ , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=UpperCAmelCase__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=UpperCAmelCase__ )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._nlp, []
for entry in self._reader:
__UpperCAmelCase : Any = nlp(**UpperCAmelCase__ ) if self._reader.is_multi_columns else nlp(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
outputs.append(UpperCAmelCase__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__UpperCAmelCase : Tuple = self._reader.save_binary(UpperCAmelCase__ )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(UpperCAmelCase__ )
| 254 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __lowerCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'blip_2_vision_model'
def __init__( self , __UpperCAmelCase=1408 , __UpperCAmelCase=6144 , __UpperCAmelCase=39 , __UpperCAmelCase=16 , __UpperCAmelCase=224 , __UpperCAmelCase=14 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.00001 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-1_0 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Tuple:
super().__init__(**UpperCAmelCase__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
_a = qkv_bias
@classmethod
def _UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase__ )
_a , _a = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
_a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __lowerCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
A_ : int = 'blip_2_qformer'
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=2 , __UpperCAmelCase=1408 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = cross_attention_frequency
_a = encoder_hidden_size
@classmethod
def _UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase__ )
_a , _a = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
_a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __lowerCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
A_ : int = 'blip-2'
A_ : int = True
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=32 , **__UpperCAmelCase ) -> List[str]:
super().__init__(**UpperCAmelCase__ )
if vision_config is None:
_a = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
_a = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
_a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_a = BlipaVisionConfig(**UpperCAmelCase__ )
_a = BlipaQFormerConfig(**UpperCAmelCase__ )
_a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_a = CONFIG_MAPPING[text_model_type](**UpperCAmelCase__ )
_a = self.text_config.tie_word_embeddings
_a = self.text_config.is_encoder_decoder
_a = num_query_tokens
_a = self.vision_config.hidden_size
_a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a = 1.0
_a = 0.02
@classmethod
def _UpperCAmelCase ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase__ , )
def _UpperCAmelCase ( self ) -> Any:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.qformer_config.to_dict()
_a = self.text_config.to_dict()
_a = self.__class__.model_type
return output | 320 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , UpperCAmelCase__ , )
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = RobertaConfig
__lowerCAmelCase : Union[str, Any] = """roberta"""
def __init__( self :Any ,__lowercase :List[str] ):
super().__init__(UpperCAmelCase__ )
snake_case__ : Optional[int] = RobertaEmbeddings(UpperCAmelCase__ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , UpperCAmelCase__ , )
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = RobertaConfig
__lowerCAmelCase : List[Any] = """roberta"""
def __init__( self :Optional[Any] ,__lowercase :Tuple ):
super().__init__(UpperCAmelCase__ )
snake_case__ : Optional[Any] = config.num_labels
snake_case__ : List[str] = config.num_hidden_layers
snake_case__ : List[Any] = DeeRobertaModel(UpperCAmelCase__ )
snake_case__ : List[Any] = nn.Dropout(config.hidden_dropout_prob )
snake_case__ : int = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Tuple=None ,__lowercase :Union[str, Any]=None ,__lowercase :Optional[Any]=None ,__lowercase :Dict=None ,__lowercase :List[Any]=None ,__lowercase :Optional[Any]=None ,__lowercase :int=None ,__lowercase :Dict=-1 ,__lowercase :Optional[int]=False ,):
snake_case__ : Any = self.num_layers
try:
snake_case__ : Optional[int] = self.roberta(
UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,token_type_ids=UpperCAmelCase__ ,position_ids=UpperCAmelCase__ ,head_mask=UpperCAmelCase__ ,inputs_embeds=UpperCAmelCase__ ,)
snake_case__ : Any = outputs[1]
snake_case__ : Dict = self.dropout(UpperCAmelCase__ )
snake_case__ : Tuple = self.classifier(UpperCAmelCase__ )
snake_case__ : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ : Tuple = e.message
snake_case__ : List[str] = e.exit_layer
snake_case__ : Optional[int] = outputs[0]
if not self.training:
snake_case__ : int = entropy(UpperCAmelCase__ )
snake_case__ : List[Any] = []
snake_case__ : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ : str = MSELoss()
snake_case__ : Any = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
snake_case__ : Tuple = CrossEntropyLoss()
snake_case__ : Any = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
snake_case__ : List[str] = []
for highway_exit in outputs[-1]:
snake_case__ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ : Union[str, Any] = MSELoss()
snake_case__ : int = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
snake_case__ : Union[str, Any] = CrossEntropyLoss()
snake_case__ : Tuple = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
snake_case__ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ : Union[str, Any] = (loss,) + outputs
if not self.training:
snake_case__ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 230 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCamelCase ( UpperCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple =KandinskyVaaControlnetImgaImgPipeline
__UpperCAmelCase : Optional[int] =["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__UpperCAmelCase : str =["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__UpperCAmelCase : str =[
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__UpperCAmelCase : Optional[int] =False
@property
def snake_case ( self ):
return 32
@property
def snake_case ( self ):
return 32
@property
def snake_case ( self ):
return self.time_input_dim
@property
def snake_case ( self ):
return self.time_input_dim * 4
@property
def snake_case ( self ):
return 1_00
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCAmelCase__ )
return model
@property
def snake_case ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self ):
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCAmelCase__ )
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case ( self , __a , __a=0 ):
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase__ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create hint
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
if str(UpperCAmelCase__ ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
__lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__lowerCAmelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def snake_case ( self ):
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCAmelCase__ )
__lowerCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = init_image.resize((5_12, 5_12) )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
__lowerCAmelCase = torch.from_numpy(np.array(UpperCAmelCase__ ) ).float() / 2_5_5.0
__lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCAmelCase = "A robot, 4k photo"
__lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase__ )
__lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCAmelCase__ , image=UpperCAmelCase__ , strength=0.8_5 , generator=UpperCAmelCase__ , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
image=UpperCAmelCase__ , image_embeds=UpperCAmelCase__ , negative_image_embeds=UpperCAmelCase__ , hint=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
| 57 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase :
lowerCamelCase__ : str = OPTConfig
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Tuple = 'gelu'
def __init__( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=1_3 , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=False , __UpperCAmelCase : Optional[int]=9_9 , __UpperCAmelCase : Dict=1_6 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Dict=2_0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : int=1_6 , __UpperCAmelCase : List[Any]=1_6 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = word_embed_proj_dim
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase__ , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_opt_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFOPTModel(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict["""attention_mask"""][:1, :]
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-3 )
@require_tf
class lowerCamelCase (UpperCAmelCase__ ,UpperCAmelCase__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ : Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ : str = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = 1_0
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ):
if hasattr(UpperCAmelCase__ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCAmelCase__ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
SCREAMING_SNAKE_CASE__ = model_class(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase__ )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ = False
self.assertTrue(UpperCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ = False
self.assertTrue(UpperCAmelCase__ )
def A ( snake_case__ ):
'''simple docstring'''
return tf.constant(lowercase_ , dtype=tf.intaa )
@require_tf
class lowerCamelCase (unittest.TestCase ):
lowerCamelCase__ : Tuple = 9_9
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
SCREAMING_SNAKE_CASE__ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE__ = tf.not_equal(UpperCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE__ = model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).last_hidden_state
SCREAMING_SNAKE_CASE__ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=4e-3 ) )
SCREAMING_SNAKE_CASE__ = tf.function(UpperCAmelCase__ , jit_compile=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = xla_generate(UpperCAmelCase__ , UpperCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE__ = """facebook/opt-350m"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" , padding=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE__ = tf.function(UpperCAmelCase__ , jit_compile=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase (unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = """facebook/opt-125m"""
SCREAMING_SNAKE_CASE__ = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(UpperCAmelCase__ , max_length=1_0 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = """facebook/opt-350m"""
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = """left"""
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" , padding=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = inputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs["""attention_mask"""] )
SCREAMING_SNAKE_CASE__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = [
"""Hello, my dog is a little bit of a dork.\nI\'m a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = """facebook/opt-350m"""
SCREAMING_SNAKE_CASE__ = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(UpperCAmelCase__ , max_length=1_0 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 165 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
if isinstance(lowercase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _UpperCamelCase ( UpperCAmelCase__ ):
UpperCAmelCase_ = ["""pixel_values"""]
def __init__( self :Any , lowerCamelCase :bool = True , lowerCamelCase :Dict[str, int] = None , lowerCamelCase :PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase :bool = True , lowerCamelCase :Dict[str, int] = None , lowerCamelCase :bool = True , lowerCamelCase :Union[int, float] = 1 / 255 , lowerCamelCase :bool = True , lowerCamelCase :Optional[Union[float, List[float]]] = None , lowerCamelCase :Optional[Union[float, List[float]]] = None , **lowerCamelCase :Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase__ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self :int , lowerCamelCase :np.ndarray , lowerCamelCase :Dict[str, int] , lowerCamelCase :PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase :Tuple , ) -> np.ndarray:
UpperCAmelCase__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" in size:
UpperCAmelCase__ = get_resize_output_image_size(UpperCAmelCase__ , size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
elif "height" in size and "width" in size:
UpperCAmelCase__ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :np.ndarray , lowerCamelCase :Dict[str, int] , lowerCamelCase :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase :Union[str, Any] , ) -> np.ndarray:
UpperCAmelCase__ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :np.ndarray , lowerCamelCase :Union[int, float] , lowerCamelCase :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase :Optional[Any] , ) -> Union[str, Any]:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self :str , lowerCamelCase :np.ndarray , lowerCamelCase :Union[float, List[float]] , lowerCamelCase :Union[float, List[float]] , lowerCamelCase :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase :List[Any] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :ImageInput , lowerCamelCase :bool = None , lowerCamelCase :Dict[str, int] = None , lowerCamelCase :PILImageResampling = None , lowerCamelCase :bool = None , lowerCamelCase :Dict[str, int] = None , lowerCamelCase :bool = None , lowerCamelCase :float = None , lowerCamelCase :bool = None , lowerCamelCase :Optional[Union[float, List[float]]] = None , lowerCamelCase :Optional[Union[float, List[float]]] = None , lowerCamelCase :Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ = to_numpy_array(UpperCAmelCase__ )
if do_resize:
UpperCAmelCase__ = self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ )
if do_center_crop:
UpperCAmelCase__ = self.center_crop(UpperCAmelCase__ , size=UpperCAmelCase__ )
if do_rescale:
UpperCAmelCase__ = self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ )
if do_normalize:
UpperCAmelCase__ = self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ )
UpperCAmelCase__ = to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :ImageInput , lowerCamelCase :bool = None , lowerCamelCase :Dict[str, int] = None , lowerCamelCase :PILImageResampling = None , lowerCamelCase :bool = None , lowerCamelCase :Dict[str, int] = None , lowerCamelCase :bool = None , lowerCamelCase :float = None , lowerCamelCase :bool = None , lowerCamelCase :Optional[Union[float, List[float]]] = None , lowerCamelCase :Optional[Union[float, List[float]]] = None , lowerCamelCase :Optional[Union[str, TensorType]] = None , lowerCamelCase :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase :Optional[Any] , ) -> PIL.Image.Image:
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ = make_batched(UpperCAmelCase__ )
UpperCAmelCase__ = [
[
self._preprocess_image(
image=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , do_center_crop=UpperCAmelCase__ , crop_size=UpperCAmelCase__ , do_rescale=UpperCAmelCase__ , rescale_factor=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , )
for img in video
]
for video in videos
]
UpperCAmelCase__ = {"pixel_values": videos}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 169 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 0 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase_ )
__lowerCAmelCase , __lowerCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
else:
__lowerCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase_ )
__lowerCAmelCase , __lowerCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
__lowerCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
__lowerCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowerCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
__lowerCAmelCase = prophet
__lowerCAmelCase = prophet_old
else:
__lowerCAmelCase = prophet.prophetnet
__lowerCAmelCase = prophet_old.model
__lowerCAmelCase = False
for attribute in attributes:
if attribute in mapping:
__lowerCAmelCase = mapping[attribute]
if not hasattr(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
__lowerCAmelCase = attribute
elif hasattr(lowercase_ , lowercase_ ):
__lowerCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCAmelCase = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__lowerCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCAmelCase = old_model.bias
logger.info(f"""{attribute} is initialized""" )
__lowerCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase_ , """in_proj_weight""" ):
__lowerCAmelCase = old_model.in_proj_weight.shape[0] // 3
__lowerCAmelCase = getattr(lowercase_ , lowercase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCAmelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCAmelCase = True
break
if attribute.isdigit():
__lowerCAmelCase = model[int(lowercase_ )]
__lowerCAmelCase = old_model[int(lowercase_ )]
else:
__lowerCAmelCase = getattr(lowercase_ , lowercase_ )
if old_attribute == "":
__lowerCAmelCase = old_model
else:
if not hasattr(lowercase_ , lowercase_ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__lowerCAmelCase = getattr(lowercase_ , lowercase_ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 301 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_A = pytest.mark.integration
_A = {"""comet"""}
_A = importlib.util.find_spec('''fairseq''') is not None
_A = {"""code_eval"""}
_A = os.name == """nt"""
_A = {"""bertscore""", """frugalscore""", """perplexity"""}
_A = importlib.util.find_spec('''transformers''') is not None
def lowerCamelCase__ ( a__ : str ) -> Dict:
@wraps(lowercase_ )
def wrapper(self : Tuple , a__ : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def lowerCamelCase__ ( a__ : List[Any] ) -> Optional[Any]:
@wraps(lowercase_ )
def wrapper(self : Any , a__ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def lowerCamelCase__ ( a__ : Dict ) -> Optional[Any]:
@wraps(lowercase_ )
def wrapper(self : Dict , a__ : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def lowerCamelCase__ ( ) -> Optional[Any]:
UpperCamelCase_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@local
class lowercase_ ( parameterized.TestCase ):
A__ : List[Any] = {}
A__ : Tuple = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = """[...]"""
UpperCamelCase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCAmelCase__ ) ).module_path )
UpperCamelCase_ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCAmelCase__ )
# check parameters
UpperCamelCase_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCAmelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
UpperCamelCase_ = doctest.testmod(UpperCAmelCase__ , verbose=UpperCAmelCase__ , raise_on_error=UpperCAmelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = """[...]"""
UpperCamelCase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCAmelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
UpperCamelCase_ = doctest.testmod(UpperCAmelCase__ , verbose=UpperCAmelCase__ , raise_on_error=UpperCAmelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase__ ):
yield
else:
yield
@contextmanager
def lowerCamelCase_ ( self ):
"""simple docstring"""
def load_local_metric(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
return load_metric(os.path.join("""metrics""" , UpperCAmelCase__ ) , *UpperCAmelCase__ , **UpperCAmelCase__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
UpperCamelCase_ = load_local_metric
yield
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase ):
"""simple docstring"""
def wrapper(__UpperCamelCase ):
UpperCamelCase_ = contextmanager(UpperCAmelCase__ )
UpperCamelCase_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def lowerCamelCase__ ( a__ : List[Any] ) -> str:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class lowercase_ ( UpperCAmelCase__ ):
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
UpperCamelCase_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def lowerCamelCase__ ( a__ : Dict ) -> Tuple:
import torch
def bert_cos_score_idf(a__ : List[Any] , a__ : Optional[Any] , *a__ : str , **a__ : Union[str, Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
UpperCamelCase_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def lowerCamelCase__ ( a__ : Optional[int] ) -> Union[str, Any]:
def load_from_checkpoint(a__ : Dict ):
class lowercase_ :
def lowerCamelCase_ ( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
assert len(UpperCAmelCase__ ) == 2
UpperCamelCase_ = [0.19, 0.92]
return scores, sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
UpperCamelCase_ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
UpperCamelCase_ = load_from_checkpoint
yield
def lowerCamelCase__ ( ) -> Optional[int]:
UpperCamelCase_ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
UpperCamelCase_ = """ERROR"""
UpperCamelCase_ = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(lowercase_ , match=re.escape(lowercase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowercase_ )
| 122 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a :
UpperCAmelCase_ : str =MBartConfig
UpperCAmelCase_ : int ={}
UpperCAmelCase_ : Optional[int] ="gelu"
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=2_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = bos_token_id
def UpperCamelCase_ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase = prepare_mbart_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = TFMBartModel(config=UpperCAmelCase__ ).get_decoder()
lowercase = inputs_dict['input_ids']
lowercase = input_ids[:1, :]
lowercase = inputs_dict['attention_mask'][:1, :]
lowercase = inputs_dict['head_mask']
lowercase = 1
# first forward pass
lowercase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowercase , lowercase = outputs.to_tuple()
lowercase = past_key_values[1]
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any]=None , __snake_case : Tuple=None , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
UpperCAmelCase_ : Tuple =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase_ : int =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ : int =(
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : str =True
UpperCAmelCase_ : Optional[int] =False
UpperCAmelCase_ : str =False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase_ ( self ):
lowercase = TFMBartModelTester(self )
lowercase = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
UpperCAmelCase_ : Union[str, Any] =[
" UN Chief Says There Is No Military Solution in Syria",
]
UpperCAmelCase_ : int =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
UpperCAmelCase_ : Optional[Any] ="facebook/mbart-large-en-ro"
@cached_property
def UpperCamelCase_ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self ):
lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase_ ( self , **_lowerCamelCase ):
lowercase = self.translate_src_text(**UpperCAmelCase__ )
self.assertListEqual(self.expected_text , UpperCAmelCase__ )
def UpperCamelCase_ ( self , **_lowerCamelCase ):
lowercase = self.tokenizer(self.src_text , **UpperCAmelCase__ , return_tensors='tf' )
lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase = self.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
return generated_words
@slow
def UpperCamelCase_ ( self ):
self._assert_generated_batch_equal_expected()
| 220 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A__ : List[str] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.