code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'levit'
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any]=2_24 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : Dict=[1_28, 2_56, 3_84] , lowerCAmelCase_ : Tuple=[4, 8, 12] , lowerCAmelCase_ : List[str]=[4, 4, 4] , lowerCAmelCase_ : List[str]=[16, 16, 16] , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : List[Any]=[2, 2, 2] , lowerCAmelCase_ : List[Any]=[2, 2, 2] , lowerCAmelCase_ : Optional[Any]=0.02 , **lowerCAmelCase_ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : List[Any] =image_size
A__ : Any =num_channels
A__ : List[Any] =kernel_size
A__ : int =stride
A__ : Dict =padding
A__ : Union[str, Any] =hidden_sizes
A__ : Any =num_attention_heads
A__ : List[Any] =depths
A__ : List[Any] =key_dim
A__ : int =drop_path_rate
A__ : Dict =patch_size
A__ : Tuple =attention_ratio
A__ : Optional[int] =mlp_ratio
A__ : Optional[Any] =initializer_range
A__ : Dict =[
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self : int ) -> float:
'''simple docstring'''
return 1e-4
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__snake_case : List[Any] = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __lowerCamelCase ( __snake_case : Optional[int]=True ) -> str:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowercase_ ) )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = None
__snake_case = None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
A__ : Any =dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
A__ : List[Any] =import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
A__ : DatasetBuilder =builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
A__ : Tuple ="""/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
A__ : Tuple =cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ : int =tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
A__ : int =dataset_module_factory("""wikipedia""", cache_dir=__snake_case )
A__ : str =import_main_class(dataset_module.module_path )
A__ : DatasetBuilder =builder_cls(
cache_dir=__snake_case, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A__ : Union[str, Any] =None
builder_instance.download_and_prepare()
A__ : Optional[Any] =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =dataset_module_factory("""wikipedia""", cache_dir=__snake_case )
A__ : int =import_main_class(dataset_module.module_path, dataset=__snake_case )
A__ : DatasetBuilder =builder_cls(
cache_dir=__snake_case, config_name="""20220301.frr""", hash=dataset_module.hash, )
A__ : Any =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__snake_case, __snake_case )
assert "train" in ds
assert isinstance(ds["""train"""], __snake_case )
assert next(iter(ds["""train"""] ) )
| 687 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=18 , lowerCAmelCase_ : List[str]=30 , lowerCAmelCase_ : Dict=4_00 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=True , ) -> int:
'''simple docstring'''
A__ : List[Any] =size if size is not None else {"""shortest_edge""": 20}
A__ : Dict =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A__ : Any =parent
A__ : List[Any] =batch_size
A__ : Dict =num_channels
A__ : Optional[int] =image_size
A__ : Union[str, Any] =min_resolution
A__ : Any =max_resolution
A__ : List[Any] =do_resize
A__ : Union[str, Any] =size
A__ : str =do_center_crop
A__ : Dict =crop_size
A__ : int =do_flip_channel_order
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = MobileViTImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =MobileViTImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_flip_channel_order""" ) )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A__ : List[str] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
A__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
A__ : Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Union[str, Any] =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
# Initialize image_processing
A__ : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
A__ : List[str] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : int =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
A__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
A__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : int =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 687 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """depth_multiplier""" ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : List[str]=0.25 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : List[Any]=8 , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[int]="relu6" , lowerCAmelCase_ : List[str]=12_80 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : Dict=None , ) -> Dict:
'''simple docstring'''
A__ : int =parent
A__ : List[str] =batch_size
A__ : Union[str, Any] =num_channels
A__ : Optional[Any] =image_size
A__ : List[Any] =depth_multiplier
A__ : Dict =depth_divisible_by
A__ : Tuple =min_depth
A__ : int =expand_ratio
A__ : Tuple =tf_padding
A__ : Tuple =output_stride
A__ : Dict =first_layer_is_expansion
A__ : Any =finegrained_output
A__ : List[str] =hidden_act
A__ : Tuple =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
A__ : Union[str, Any] =classifier_dropout_prob
A__ : int =use_labels
A__ : Optional[Any] =is_training
A__ : int =num_labels
A__ : Dict =initializer_range
A__ : List[Any] =scope
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Tuple =None
A__ : Union[str, Any] =None
if self.use_labels:
A__ : Any =ids_tensor([self.batch_size] , self.num_labels )
A__ : Dict =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : str =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
A__ : Dict =MobileNetVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Dict =self.num_labels
A__ : List[Any] =MobileNetVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
A__ : Any =self.num_labels
A__ : int =MobileNetVaForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A__ : Optional[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : List[Any] =config_and_inputs
A__ : Any ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : int =MobileNetVaModelTester(self )
A__ : List[Any] =MobileNetVaConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
pass
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ , A__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : str =model_class(lowerCAmelCase_ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : str =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ):
A__ : Dict =model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
A__ : Optional[int] =outputs.hidden_states
A__ : Tuple =16
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Tuple =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Dict =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Any =MobileNetVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ : Tuple =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(lowerCAmelCase_ )
A__ : str =self.default_image_processor
A__ : Optional[Any] =prepare_img()
A__ : str =image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
A__ : Tuple =model(**lowerCAmelCase_ )
# verify the logits
A__ : Optional[Any] =torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
A__ : List[str] =torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] =MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
A__ : Union[str, Any] =model.to(lowerCAmelCase_ )
A__ : Optional[int] =MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
A__ : List[str] =prepare_img()
A__ : List[str] =image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
A__ : Optional[int] =model(**lowerCAmelCase_ )
A__ : int =outputs.logits
# verify the logits
A__ : Tuple =torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
A__ : str =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowerCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 687 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__snake_case : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__snake_case : Any = F"""https://www.google.com/search?q={query}&num=100"""
__snake_case : Optional[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__snake_case : Tuple = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__snake_case : List[str] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 687 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__snake_case : List[Any] = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : bool = True ) -> None:
'''simple docstring'''
A__ : dict[T, list[T]] ={} # dictionary of lists
A__ : Optional[int] =directed
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : T , lowerCAmelCase_ : T ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
A__ : str =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
A__ : Union[str, Any] =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A__ : Any =[destination_vertex]
A__ : List[Any] =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
A__ : str =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A__ : Optional[int] =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A__ : List[str] =[destination_vertex]
A__ : Union[str, Any] =[]
return self
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 687 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase :
'''simple docstring'''
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return None
class lowerCamelCase :
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return None
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase_ , """tf""" , 12 , **lowerCAmelCase_ )
@require_torch
@slow
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase_ , """pt""" , 12 , **lowerCAmelCase_ )
@require_torch
@slow
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
from transformers import BertModel
A__ : List[str] =["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(lowerCAmelCase_ ) )
vocab_file.flush()
A__ : int =BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A__ : Optional[int] =BertModel(BertConfig(vocab_size=len(lowerCAmelCase_ ) ) )
model.save_pretrained(lowerCAmelCase_ )
self._test_export(lowerCAmelCase_ , """pt""" , 12 , lowerCAmelCase_ )
@require_tf
@slow
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ : Optional[int] =self._test_export(lowerCAmelCase_ , """tf""" , 12 , **lowerCAmelCase_ )
A__ : Optional[int] =quantize(Path(lowerCAmelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ : int =self._test_export(lowerCAmelCase_ , """pt""" , 12 , **lowerCAmelCase_ )
A__ : List[str] =quantize(lowerCAmelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ : List[str] =Path(lowerCAmelCase_ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
return path
except Exception as e:
self.fail(lowerCAmelCase_ )
@require_torch
@require_tokenizers
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
from transformers import BertModel
A__ : Optional[int] =BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A__ : Any =BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowerCAmelCase_ , lowerCAmelCase_ , """pt""" )
@require_tf
@require_tokenizers
@slow
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
from transformers import TFBertModel
A__ : Tuple =TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A__ : str =BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowerCAmelCase_ , lowerCAmelCase_ , """tf""" )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Tuple =FeatureExtractionPipeline(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
A__ , A__ , A__ , A__ : Optional[Any] =infer_shapes(lowerCAmelCase_ , lowerCAmelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCAmelCase_ )
self.assertSequenceEqual(variable_names[3:] , lowerCAmelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =["""input_ids""", """attention_mask""", """token_type_ids"""]
A__ : List[str] ={"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
A__ , A__ : int =ensure_valid_input(FuncContiguousArgs() , lowerCAmelCase_ , lowerCAmelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCAmelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCAmelCase_ ) , set(lowerCAmelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCAmelCase_ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ : Optional[int] =ensure_valid_input(FuncNonContiguousArgs() , lowerCAmelCase_ , lowerCAmelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : Dict =generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__snake_case : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] =os.getenv("""SM_HP_MP_PARAMETERS""", """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
A__ : Optional[Any] =json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
A__ : Optional[Any] =os.getenv("""SM_FRAMEWORK_PARAMS""", """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
A__ : Any =json.loads(__snake_case )
if not mpi_options.get("""sagemaker_mpi_enabled""", __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , lowerCAmelCase_ , )
@cached_property
def lowercase__ ( self : str ) -> "torch.device":
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
A__ : Union[str, Any] =torch.device("""cpu""" )
A__ : str =0
elif is_sagemaker_model_parallel_available():
A__ : int =smp.local_rank()
A__ : Tuple =torch.device("""cuda""" , lowerCAmelCase_ )
A__ : Any =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
A__ : Tuple =int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
A__ : Any =torch.device("""cuda""" , self.local_rank )
A__ : Union[str, Any] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
A__ : Any =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
A__ : Union[str, Any] =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
A__ : Any =torch.device("""cuda""" , self.local_rank )
A__ : str =1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase_ )
return device
@property
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return False
| 687 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
__snake_case = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
__snake_case = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
__snake_case = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__snake_case = field(default=2 , metadata={'help': 'Batch size for training.'} )
__snake_case = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
__snake_case = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
__snake_case = field(
default=1_0000 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
__snake_case = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
__snake_case = field(default='cosine' , metadata={'help': 'Learning rate.'} )
__snake_case = field(
default=750 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
__snake_case = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
__snake_case = field(default=5_0000 , metadata={'help': 'Maximum number of training steps.'} )
__snake_case = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__snake_case = field(default=1024 , metadata={'help': 'Sequence lengths used for training.'} )
__snake_case = field(default=1 , metadata={'help': 'Training seed.'} )
__snake_case = field(
default=1024 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
__snake_case = field(default=lowercase_ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__snake_case = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__snake_case = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
__snake_case = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__snake_case = field(default=1024 , metadata={'help': 'Length of sequences to be evaluated.'} )
__snake_case = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__snake_case = field(default=lowercase_ , metadata={'help': 'Number of workers used for code evaluation.'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
__snake_case = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
__snake_case = field(default=256 , metadata={'help': 'Maximum number of newly generated tokens.'} )
__snake_case = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
__snake_case = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
__snake_case = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
__snake_case = field(
default=200 , metadata={'help': 'Number of completions to generate for each sample.'} )
__snake_case = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
__snake_case = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
__snake_case = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
__snake_case = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default=lowercase_ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
__snake_case = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
__snake_case = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
__snake_case = field(
default=10_0000 , metadata={'help': 'Number of files to save per JSON output file.'} )
__snake_case = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__snake_case = field(
default=1000 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
__snake_case = field(
default=100 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
__snake_case = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
__snake_case = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
__snake_case = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
__snake_case = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
__snake_case = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
__snake_case = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
__snake_case = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__snake_case = field(default=20_0000 , metadata={'help': 'Number of examples to train tokenizer on.'} )
__snake_case = field(
default=3_2768 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
__snake_case = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
__snake_case = field(default=lowercase_ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
__snake_case = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
__snake_case = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
__snake_case = field(default=lowercase_ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
__snake_case = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
__snake_case = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
__snake_case = field(default=lowercase_ , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 687 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : Tuple = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
__snake_case : int = {
'RUCAIBox/mvp': 1024,
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
__snake_case = MvpTokenizer
def __init__( self : Dict , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]="replace" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Dict="</s>" , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : str="<unk>" , lowerCAmelCase_ : str="<pad>" , lowerCAmelCase_ : List[str]="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Dict=True , **lowerCAmelCase_ : Union[str, Any] , ) -> str:
'''simple docstring'''
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Any =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_ ) != add_prefix_space:
A__ : int =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) )
A__ : Dict =add_prefix_space
A__ : int =pre_tok_class(**lowerCAmelCase_ )
A__ : Dict =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ : List[Any] ="""post_processor"""
A__ : List[str] =getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
if tokenizer_component_instance:
A__ : List[str] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ : Union[str, Any] =tuple(state["""sep"""] )
if "cls" in state:
A__ : int =tuple(state["""cls"""] )
A__ : Dict =False
if state.get("""add_prefix_space""" , lowerCAmelCase_ ) != add_prefix_space:
A__ : str =add_prefix_space
A__ : Union[str, Any] =True
if state.get("""trim_offsets""" , lowerCAmelCase_ ) != trim_offsets:
A__ : Optional[int] =trim_offsets
A__ : List[Any] =True
if changes_to_apply:
A__ : Dict =getattr(lowerCAmelCase_ , state.pop("""type""" ) )
A__ : Dict =component_class(**lowerCAmelCase_ )
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
A__ : List[str] =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value
A__ : Any =value
def lowercase__ ( self : Dict , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> BatchEncoding:
'''simple docstring'''
A__ : List[Any] =kwargs.get("""is_split_into_words""" , lowerCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
A__ : List[str] =kwargs.get("""is_split_into_words""" , lowerCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : Union[str, Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=None ) -> Dict:
'''simple docstring'''
A__ : Any =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Optional[Any] =[self.sep_token_id]
A__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 687 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__snake_case, __snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(__snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__snake_case : str = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__snake_case : Dict = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__snake_case : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : List[str], __snake_case : bool, __snake_case : Optional[Dict[int, int]] = None, __snake_case : bool = False, ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
A__ : List[Any] =new_id
# turn into Numpy arrays
A__ : Any =np.array(__snake_case )
A__ : Optional[Any] =np.array(__snake_case )
if reduce_labels:
A__ : Dict =255
A__ : Optional[int] =label - 1
A__ : Optional[Any] =255
A__ : int =label != ignore_index
A__ : List[Any] =np.not_equal(__snake_case, __snake_case )
A__ : Union[str, Any] =pred_label[mask]
A__ : Optional[int] =np.array(__snake_case )[mask]
A__ : List[Any] =pred_label[pred_label == label]
A__ : Optional[int] =np.histogram(__snake_case, bins=__snake_case, range=(0, num_labels - 1) )[0]
A__ : Optional[Any] =np.histogram(__snake_case, bins=__snake_case, range=(0, num_labels - 1) )[0]
A__ : str =np.histogram(__snake_case, bins=__snake_case, range=(0, num_labels - 1) )[0]
A__ : List[str] =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[Any], __snake_case : Tuple, __snake_case : bool, __snake_case : Optional[Dict[int, int]] = None, __snake_case : bool = False, ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =np.zeros((num_labels,), dtype=np.floataa )
A__ : Dict =np.zeros((num_labels,), dtype=np.floataa )
A__ : List[str] =np.zeros((num_labels,), dtype=np.floataa )
A__ : List[str] =np.zeros((num_labels,), dtype=np.floataa )
for result, gt_seg_map in zip(__snake_case, __snake_case ):
A__ , A__ , A__ , A__ : Union[str, Any] =intersect_and_union(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Tuple, __snake_case : Tuple, __snake_case : bool, __snake_case : Optional[int] = None, __snake_case : Optional[Dict[int, int]] = None, __snake_case : bool = False, ) -> Tuple:
"""simple docstring"""
A__ , A__ , A__ , A__ : List[str] =total_intersect_and_union(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
# compute metrics
A__ : Union[str, Any] ={}
A__ : Union[str, Any] =total_area_intersect.sum() / total_area_label.sum()
A__ : List[Any] =total_area_intersect / total_area_union
A__ : List[str] =total_area_intersect / total_area_label
A__ : Any =np.nanmean(__snake_case )
A__ : Optional[Any] =np.nanmean(__snake_case )
A__ : Union[str, Any] =all_acc
A__ : Tuple =iou
A__ : Tuple =acc
if nan_to_num is not None:
A__ : List[str] ={metric: np.nan_to_num(__snake_case, nan=__snake_case ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ) -> str:
'''simple docstring'''
A__ : Any =mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'layoutlmv3'
def __init__( self : str , lowerCAmelCase_ : Optional[Any]=5_02_65 , lowerCAmelCase_ : List[str]=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : Tuple=30_72 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=5_12 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Union[str, Any]=1e-5 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : List[Any]=1_28 , lowerCAmelCase_ : Optional[int]=1_28 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : Union[str, Any]=1_28 , lowerCAmelCase_ : Union[str, Any]=64 , lowerCAmelCase_ : Tuple=2_56 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=2_24 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Tuple=16 , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vocab_size=lowerCAmelCase_ , hidden_size=lowerCAmelCase_ , num_hidden_layers=lowerCAmelCase_ , num_attention_heads=lowerCAmelCase_ , intermediate_size=lowerCAmelCase_ , hidden_act=lowerCAmelCase_ , hidden_dropout_prob=lowerCAmelCase_ , attention_probs_dropout_prob=lowerCAmelCase_ , max_position_embeddings=lowerCAmelCase_ , type_vocab_size=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , layer_norm_eps=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : List[str] =max_ad_position_embeddings
A__ : Dict =coordinate_size
A__ : List[Any] =shape_size
A__ : Dict =has_relative_attention_bias
A__ : Tuple =rel_pos_bins
A__ : List[Any] =max_rel_pos
A__ : List[str] =has_spatial_attention_bias
A__ : Any =rel_ad_pos_bins
A__ : str =max_rel_ad_pos
A__ : str =text_embed
A__ : List[Any] =visual_embed
A__ : Any =input_size
A__ : Optional[int] =num_channels
A__ : Union[str, Any] =patch_size
A__ : int =classifier_dropout
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.12' )
@property
def lowercase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 12
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : "ProcessorMixin" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 40 , lowerCAmelCase_ : int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , lowerCAmelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ : Optional[int] =compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ : Optional[Any] =processor.tokenizer.num_special_tokens_to_add(lowerCAmelCase_ )
A__ : Optional[int] =compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
A__ : List[Any] =[[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A__ : str =[[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A__ : Optional[Any] =self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =dict(
processor(
lowerCAmelCase_ , text=lowerCAmelCase_ , boxes=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , ) )
return inputs
| 687 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Dict = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = BartphoTokenizer
__snake_case = False
__snake_case = True
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
super().setUp()
A__ : int =["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
A__ : int =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
A__ : Optional[int] =BartphoTokenizer(lowerCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
A__ : Optional[Any] ="""This is a là test"""
A__ : List[Any] ="""This is a<unk><unk> test"""
return input_text, output_text
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =BartphoTokenizer(lowerCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
A__ : Union[str, Any] ="""This is a là test"""
A__ : Optional[Any] ="""▁This ▁is ▁a ▁l à ▁t est""".split()
A__ : List[Any] =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[Any] =tokens + [tokenizer.unk_token]
A__ : Tuple =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 10**12 ) -> int:
"""simple docstring"""
A__ : int =1
A__ : Any =0
A__ : str =1
A__ : Optional[int] =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 687 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : float ) -> float:
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if equation(__snake_case ) * equation(__snake_case ) >= 0:
raise ValueError("""Wrong space!""" )
A__ : int =a
while (b - a) >= 0.01:
# Find middle point
A__ : Optional[int] =(a + b) / 2
# Check if middle point is root
if equation(__snake_case ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__snake_case ) * equation(__snake_case ) < 0:
A__ : List[Any] =c
else:
A__ : Optional[int] =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 687 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
A__ : list[float] =list(lowerCAmelCase_ )
A__ : Any =degree
def __add__( self : List[str] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
A__ : Optional[Any] =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
A__ : str =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : List[Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Optional[Any] ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Dict , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : int | float ) -> int | float:
'''simple docstring'''
A__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Any ) -> str:
'''simple docstring'''
A__ : Dict =""""""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.__str__()
def lowercase__ ( self : List[str] ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * self.degree
for i in range(self.degree ):
A__ : str =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + 2)
A__ : Any =constant
for i in range(self.degree + 1 ):
A__ : List[str] =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Union[str, Any] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : str , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__snake_case : str = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__snake_case : int = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__snake_case : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__snake_case : int = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__snake_case : str = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__snake_case : Union[str, Any] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__snake_case : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ , A__ : List[str] =randrange(len(__snake_case ) ), randrange(len(__snake_case ) )
A__ : Optional[Any] =["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
A__ , A__ : Any =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( __snake_case : int = 100 ) -> Optional[int]:
"""simple docstring"""
return (generate_random_hand() for _ in range(__snake_case ))
@pytest.mark.parametrize("""hand, expected""", __snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
assert PokerHand(__snake_case )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""", __snake_case )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(__snake_case )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""", __snake_case )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[str], __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Any =PokerHand(__snake_case )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""", __snake_case )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Dict ) -> List[str]:
"""simple docstring"""
assert PokerHand(__snake_case )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""", __snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
assert PokerHand(__snake_case )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""", __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : Tuple, __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(__snake_case ).compare_with(PokerHand(__snake_case ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""", generate_random_hands() )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Union[str, Any], __snake_case : str ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(__snake_case ).compare_with(PokerHand(__snake_case ) ) == expected
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : int =[PokerHand(__snake_case ) for hand in SORTED_HANDS]
A__ : Union[str, Any] =poker_hands.copy()
shuffle(__snake_case )
A__ : Optional[Any] =chain(sorted(__snake_case ) )
for index, hand in enumerate(__snake_case ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ : List[Any] =[PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__snake_case )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =PokerHand("""2C 4S AS 3D 5C""" )
A__ : List[Any] =True
A__ : Any =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =0
A__ : List[Any] =os.path.abspath(os.path.dirname(__snake_case ) )
A__ : Tuple =os.path.join(__snake_case, """poker_hands.txt""" )
with open(__snake_case ) as file_hand:
for line in file_hand:
A__ : Union[str, Any] =line[:14].strip()
A__ : List[str] =line[15:].strip()
A__ , A__ : Any =PokerHand(__snake_case ), PokerHand(__snake_case )
A__ : Tuple =player.compare_with(__snake_case )
if output == "Win":
answer += 1
assert answer == 376
| 687 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : int = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> List[Any]:
"""simple docstring"""
A__ : Union[str, Any] ={
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1E-5,
"""token_type_vocab_size""": 2,
}
A__ : List[Any] =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ : Tuple =BERTEncoder(
attention_cell=predefined_args["""attention_cell"""], num_layers=predefined_args["""num_layers"""], units=predefined_args["""units"""], hidden_size=predefined_args["""hidden_size"""], max_length=predefined_args["""max_length"""], num_heads=predefined_args["""num_heads"""], scaled=predefined_args["""scaled"""], dropout=predefined_args["""dropout"""], output_attention=__snake_case, output_all_encodings=__snake_case, use_residual=predefined_args["""use_residual"""], activation=predefined_args.get("""activation""", """gelu""" ), layer_norm_eps=predefined_args.get("""layer_norm_eps""", __snake_case ), )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ : Optional[Any] ="""openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
A__ : List[Any] =os.path.join(get_home_dir(), """models""" )
A__ : List[str] =_load_vocab(__snake_case, __snake_case, __snake_case, cls=__snake_case )
A__ : Dict =nlp.model.BERTModel(
__snake_case, len(__snake_case ), units=predefined_args["""units"""], embed_size=predefined_args["""embed_size"""], embed_dropout=predefined_args["""embed_dropout"""], word_embed=predefined_args["""word_embed"""], use_pooler=__snake_case, use_token_type_embed=__snake_case, token_type_vocab_size=predefined_args["""token_type_vocab_size"""], use_classifier=__snake_case, use_decoder=__snake_case, )
original_bort.load_parameters(__snake_case, cast_dtype=__snake_case, ignore_extra=__snake_case )
A__ : Union[str, Any] =original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ : Any ={
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(__snake_case ),
}
A__ : Optional[int] =BertConfig.from_dict(__snake_case )
A__ : int =BertForMaskedLM(__snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__snake_case : Dict ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__snake_case : str, __snake_case : Tuple ):
A__ : Tuple =hf_param.shape
A__ : Tuple =to_torch(params[gluon_param] )
A__ : Optional[Any] =gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
A__ : Any =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, """word_embed.0.weight""" )
A__ : Optional[Any] =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, """encoder.position_weight""" )
A__ : int =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, """encoder.layer_norm.beta""" )
A__ : Union[str, Any] =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ : List[Any] =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ : BertLayer =hf_bort_model.bert.encoder.layer[i]
# self attention
A__ : BertSelfAttention =layer.attention.self
A__ : Union[str, Any] =check_and_map_params(
self_attn.key.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
A__ : str =check_and_map_params(
self_attn.key.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
A__ : List[str] =check_and_map_params(
self_attn.query.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
A__ : Any =check_and_map_params(
self_attn.query.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
A__ : str =check_and_map_params(
self_attn.value.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
A__ : int =check_and_map_params(
self_attn.value.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
A__ : BertSelfOutput =layer.attention.output
A__ : List[Any] =check_and_map_params(
self_output.dense.bias, f"encoder.transformer_cells.{i}.proj.bias" )
A__ : str =check_and_map_params(
self_output.dense.weight, f"encoder.transformer_cells.{i}.proj.weight" )
A__ : Dict =check_and_map_params(
self_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.layer_norm.beta" )
A__ : str =check_and_map_params(
self_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
A__ : BertIntermediate =layer.intermediate
A__ : List[str] =check_and_map_params(
intermediate.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
A__ : List[str] =check_and_map_params(
intermediate.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
A__ : BertOutput =layer.output
A__ : Union[str, Any] =check_and_map_params(
bert_output.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
A__ : List[str] =check_and_map_params(
bert_output.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
A__ : List[str] =check_and_map_params(
bert_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
A__ : List[str] =check_and_map_params(
bert_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ : List[Any] =RobertaTokenizer.from_pretrained("""roberta-base""" )
A__ : str =tokenizer.encode_plus(__snake_case )["""input_ids"""]
# Get gluon output
A__ : List[str] =mx.nd.array([input_ids] )
A__ : Optional[int] =original_bort(inputs=__snake_case, token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__snake_case )
A__ : Union[str, Any] =BertModel.from_pretrained(__snake_case )
hf_bort_model.eval()
A__ : Any =tokenizer.encode_plus(__snake_case, return_tensors="""pt""" )
A__ : List[Any] =hf_bort_model(**__snake_case )[0]
A__ : Union[str, Any] =output_gluon[0].asnumpy()
A__ : List[str] =output_hf[0].detach().numpy()
A__ : int =np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ : Optional[int] =np.allclose(__snake_case, __snake_case, atol=1E-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""", __snake_case )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 687 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1_00 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
A__ : Any =self.unet.config.sample_size / self.unet.config.sample_rate
A__ : Tuple =audio_length_in_s * self.unet.config.sample_rate
A__ : Dict =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
A__ : Any =int(lowerCAmelCase_ )
if sample_size % down_scale_factor != 0:
A__ : Optional[Any] =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
A__ : int =int(lowerCAmelCase_ )
A__ : Tuple =next(iter(self.unet.parameters() ) ).dtype
A__ : List[Any] =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
A__ : List[Any] =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ , device=audio.device )
A__ : List[Any] =self.scheduler.timesteps.to(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ : Optional[Any] =self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
A__ : Optional[int] =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
A__ : List[str] =audio.clamp(-1 , 1 ).float().cpu().numpy()
A__ : Optional[Any] =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case = None
class lowerCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__snake_case = PandasConfig
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
A__ : Optional[int] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase_ , (str, list, tuple) ):
A__ : Any =data_files
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Any =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ : int =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] =[]
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Optional[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ : Optional[int] =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase_ , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : List[str] =table_cast(lowerCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase_ ) ):
with open(lowerCAmelCase_ , """rb""" ) as f:
A__ : Optional[Any] =pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase_ ) )
yield i, self._cast_table(lowerCAmelCase_ )
| 687 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
def __init__( self : Tuple , lowerCAmelCase_ : UNetaDModel , lowerCAmelCase_ : KarrasVeScheduler ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : int , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
A__ : Tuple =self.unet.config.sample_size
A__ : str =(batch_size, 3, img_size, img_size)
A__ : Union[str, Any] =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ : Dict =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ : Optional[int] =self.scheduler.schedule[t]
A__ : Optional[int] =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ , A__ : Union[str, Any] =self.scheduler.add_noise_to_input(lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : Optional[Any] =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ : Any =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : List[str] =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ : List[Any] =self.scheduler.step_correct(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , step_output.prev_sample , step_output["""derivative"""] , )
A__ : Dict =step_output.prev_sample
A__ : Union[str, Any] =(sample / 2 + 0.5).clamp(0 , 1 )
A__ : str =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Optional[int] =self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[int] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[Any] =[(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Tuple =""""""
else:
A__ : Union[str, Any] ="""deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : int =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : List[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Dict =in_proj_weight[
: config.hidden_size, :
]
A__ : Tuple =in_proj_bias[: config.hidden_size]
A__ : Tuple =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Optional[Any] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[str] =in_proj_weight[
-config.hidden_size :, :
]
A__ : List[str] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : int ) -> List[Any]:
"""simple docstring"""
A__ : str =dct.pop(__snake_case )
A__ : Optional[int] =val
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Union[str, Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Any =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : int, __snake_case : Any ) -> str:
"""simple docstring"""
A__ : str =DeiTConfig()
# all deit models have fine-tuned heads
A__ : str =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : Dict =1_000
A__ : str ="""huggingface/label-files"""
A__ : Tuple ="""imagenet-1k-id2label.json"""
A__ : str =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : Optional[int] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : Tuple =idalabel
A__ : Optional[int] ={v: k for k, v in idalabel.items()}
A__ : Optional[int] =int(deit_name[-6:-4] )
A__ : Any =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : str =192
A__ : Any =768
A__ : Any =12
A__ : str =3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] =384
A__ : Tuple =1_536
A__ : str =12
A__ : Optional[int] =6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : Union[str, Any] =1_024
A__ : List[str] =4_096
A__ : Tuple =24
A__ : str =16
# load original model from timm
A__ : List[str] =timm.create_model(__snake_case, pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : int =timm_model.state_dict()
A__ : Union[str, Any] =create_rename_keys(__snake_case, __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
A__ : str =DeiTForImageClassificationWithTeacher(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : Optional[Any] =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : List[Any] =DeiTImageProcessor(size=__snake_case, crop_size=config.image_size )
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Dict =encoding["""pixel_values"""]
A__ : Any =model(__snake_case )
A__ : Tuple =timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 687 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__snake_case : Any = re.compile(r'\s+')
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__snake_case, """""", example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Dict =[len(__snake_case ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(__snake_case ), "line_max": max(__snake_case )}
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
A__ : int =np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def __lowerCamelCase ( __snake_case : int, __snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Tuple=5 ) -> Optional[Any]:
"""simple docstring"""
A__ : Dict =["""auto-generated""", """autogenerated""", """automatically generated"""]
A__ : Optional[Any] =example["""content"""].splitlines()
for _, line in zip(range(__snake_case ), __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any]=5, __snake_case : int=0.05 ) -> Optional[Any]:
"""simple docstring"""
A__ : str =["""unit tests""", """test file""", """configuration file"""]
A__ : Optional[int] =example["""content"""].splitlines()
A__ : List[str] =0
A__ : int =0
# first test
for _, line in zip(range(__snake_case ), __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A__ : Optional[Any] =example["""content"""].count("""\n""" )
A__ : Dict =int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __lowerCamelCase ( __snake_case : int ) -> Any:
"""simple docstring"""
A__ : Tuple =["""def """, """class """, """for """, """while """]
A__ : List[str] =example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int=4 ) -> List[str]:
"""simple docstring"""
A__ : Any =example["""content"""].splitlines()
A__ : int =0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __lowerCamelCase ( __snake_case : Any ) -> Any:
"""simple docstring"""
A__ : int =tokenizer(example["""content"""], truncation=__snake_case )["""input_ids"""]
A__ : Dict =len(example["""content"""] ) / len(__snake_case )
return {"ratio": ratio}
def __lowerCamelCase ( __snake_case : int ) -> Any:
"""simple docstring"""
A__ : Any ={}
results.update(get_hash(__snake_case ) )
results.update(line_stats(__snake_case ) )
results.update(alpha_stats(__snake_case ) )
results.update(char_token_ratio(__snake_case ) )
results.update(is_autogenerated(__snake_case ) )
results.update(is_config_or_test(__snake_case ) )
results.update(has_no_keywords(__snake_case ) )
results.update(has_few_assignments(__snake_case ) )
return results
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Dict ) -> Dict:
"""simple docstring"""
if not check_uniques(__snake_case, __snake_case ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __lowerCamelCase ( __snake_case : Tuple ) -> str:
"""simple docstring"""
with open(__snake_case, """rb""" ) as f_in:
with gzip.open(str(__snake_case ) + """.gz""", """wb""", compresslevel=6 ) as f_out:
shutil.copyfileobj(__snake_case, __snake_case )
os.unlink(__snake_case )
# Settings
__snake_case : List[Any] = HfArgumentParser(PreprocessingArguments)
__snake_case : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
__snake_case : Dict = multiprocessing.cpu_count()
__snake_case : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__snake_case : str = time.time()
__snake_case : Optional[int] = load_dataset(args.dataset_name, split='train')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__snake_case : Optional[int] = time.time()
__snake_case : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__snake_case : str = set(ds.unique('hash'))
__snake_case : str = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__snake_case : List[Any] = time.time()
__snake_case : Dict = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__snake_case : Union[str, Any] = time.time()
__snake_case , __snake_case : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__snake_case : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
__snake_case : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
__snake_case : Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__snake_case : List[Any] = str(data_dir / F"""file-{file_number+1:012}.json""")
__snake_case : Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 687 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Collection[float] | None = None ) -> None:
'''simple docstring'''
if components is None:
A__ : Union[str, Any] =[]
A__ : Dict =list(lowerCAmelCase_ )
def __len__( self : str ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self : Tuple ) -> str:
'''simple docstring'''
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self : Optional[Any] , lowerCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
A__ : Dict =len(self )
if size == len(lowerCAmelCase_ ):
A__ : int =[self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("""must have the same size""" )
def __sub__( self : str , lowerCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
A__ : Tuple =len(self )
if size == len(lowerCAmelCase_ ):
A__ : List[Any] =[self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : Union[str, Any] , lowerCAmelCase_ : float ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self : int , lowerCAmelCase_ : Vector ) -> float:
'''simple docstring'''
...
def __mul__( self : int , lowerCAmelCase_ : float | Vector ) -> float | Vector:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , (float, int) ):
A__ : List[Any] =[c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
A__ : List[Any] =len(self )
A__ : Union[str, Any] =[self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("""invalid operand!""" )
def lowercase__ ( self : List[str] ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowercase__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
A__ : Any =value
def lowercase__ ( self : Dict ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
A__ : List[Any] =[c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Vector , lowerCAmelCase_ : bool = False ) -> float:
'''simple docstring'''
A__ : int =self * other
A__ : int =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( __snake_case : int ) -> Vector:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case )
return Vector([0] * dimension )
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> Vector:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (isinstance(__snake_case, __snake_case ))
A__ : Any =[0] * dimension
A__ : Dict =1
return Vector(__snake_case )
def __lowerCamelCase ( __snake_case : float, __snake_case : Vector, __snake_case : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (isinstance(__snake_case, (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : int ) -> Vector:
"""simple docstring"""
random.seed(__snake_case )
A__ : Dict =[random.randint(__snake_case, __snake_case ) for _ in range(__snake_case )]
return Vector(__snake_case )
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
A__ : Optional[Any] =matrix
A__ : Union[str, Any] =w
A__ : Union[str, Any] =h
def __str__( self : Any ) -> str:
'''simple docstring'''
A__ : List[Any] =""""""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Dict , lowerCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
A__ : Optional[int] =[]
for i in range(self.__height ):
A__ : Dict =[
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : Union[str, Any] , lowerCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
A__ : List[str] =[]
for i in range(self.__height ):
A__ : str =[
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : Dict , lowerCAmelCase_ : float ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self : Any , lowerCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
...
def __mul__( self : List[str] , lowerCAmelCase_ : float | Vector ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
A__ : str =zero_vector(self.__height )
for i in range(self.__height ):
A__ : Tuple =[
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
A__ : Dict =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.__height
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return self.__width
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
A__ : Union[str, Any] =value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
A__ : Dict =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
A__ : Optional[int] =minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("""Indices out of bounds""" )
def lowercase__ ( self : str ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A__ : List[str] =[
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def __lowerCamelCase ( __snake_case : int ) -> Matrix:
"""simple docstring"""
A__ : list[list[float]] =[[0] * n for _ in range(__snake_case )]
return Matrix(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : int, __snake_case : int ) -> Matrix:
"""simple docstring"""
random.seed(__snake_case )
A__ : list[list[float]] =[
[random.randint(__snake_case, __snake_case ) for _ in range(__snake_case )] for _ in range(__snake_case )
]
return Matrix(__snake_case, __snake_case, __snake_case )
| 687 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__snake_case : Optional[Any] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : Dict =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
A__ : Optional[Any] =self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : List[Any] ="""src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=None ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ : Optional[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ : int =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ : Tuple =black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
A__ : Tuple =os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
A__ : Optional[int] ="""TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase_ , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , )
| 687 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__snake_case : Optional[Any] = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> Tuple:
"""simple docstring"""
warnings.warn(__snake_case, __snake_case )
requires_backends(__snake_case, """sklearn""" )
return (preds == labels).mean()
def __lowerCamelCase ( __snake_case : str, __snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(__snake_case, __snake_case )
requires_backends(__snake_case, """sklearn""" )
A__ : int =simple_accuracy(__snake_case, __snake_case )
A__ : Dict =fa_score(y_true=__snake_case, y_pred=__snake_case )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(__snake_case, __snake_case )
requires_backends(__snake_case, """sklearn""" )
A__ : Union[str, Any] =pearsonr(__snake_case, __snake_case )[0]
A__ : Any =spearmanr(__snake_case, __snake_case )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : List[Any] ) -> Any:
"""simple docstring"""
warnings.warn(__snake_case, __snake_case )
requires_backends(__snake_case, """sklearn""" )
assert len(__snake_case ) == len(__snake_case ), f"Predictions and labels have mismatched lengths {len(__snake_case )} and {len(__snake_case )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__snake_case, __snake_case )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__snake_case, __snake_case )}
elif task_name == "mrpc":
return acc_and_fa(__snake_case, __snake_case )
elif task_name == "sts-b":
return pearson_and_spearman(__snake_case, __snake_case )
elif task_name == "qqp":
return acc_and_fa(__snake_case, __snake_case )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__snake_case, __snake_case )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__snake_case, __snake_case )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__snake_case, __snake_case )}
elif task_name == "rte":
return {"acc": simple_accuracy(__snake_case, __snake_case )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__snake_case, __snake_case )}
elif task_name == "hans":
return {"acc": simple_accuracy(__snake_case, __snake_case )}
else:
raise KeyError(__snake_case )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(__snake_case, __snake_case )
requires_backends(__snake_case, """sklearn""" )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(f"Predictions and labels have mismatched lengths {len(__snake_case )} and {len(__snake_case )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__snake_case, __snake_case )}
else:
raise KeyError(__snake_case )
| 687 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __lowerCamelCase ( __snake_case : bytes ) -> bytes:
"""simple docstring"""
if len(__snake_case ) != 32:
raise ValueError("""Input must be of length 32""" )
A__ : Optional[Any] =b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __lowerCamelCase ( __snake_case : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
A__ : List[str] =format(__snake_case, """08x""" )[-8:]
A__ : Any =b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def __lowerCamelCase ( __snake_case : bytes ) -> bytes:
"""simple docstring"""
A__ : Optional[int] =b""""""
for char in message:
bit_string += format(__snake_case, """08b""" ).encode("""utf-8""" )
A__ : Optional[int] =format(len(__snake_case ), """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__snake_case ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __lowerCamelCase ( __snake_case : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__snake_case ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0, len(__snake_case ), 512 ):
A__ : Optional[int] =bit_string[pos : pos + 512]
A__ : Union[str, Any] =[]
for i in range(0, 512, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
A__ : List[str] =format(__snake_case, """032b""" )
A__ : Optional[int] =""""""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__snake_case, 2 )
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __lowerCamelCase ( __snake_case : bytes ) -> bytes:
"""simple docstring"""
A__ : Dict =preprocess(__snake_case )
A__ : int =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A__ : Union[str, Any] =0X67_45_23_01
A__ : int =0Xef_cd_ab_89
A__ : Dict =0X98_ba_dc_fe
A__ : Optional[int] =0X10_32_54_76
A__ : Union[str, Any] =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__snake_case ):
A__ : Optional[int] =aa
A__ : Union[str, Any] =ba
A__ : Tuple =ca
A__ : Optional[int] =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A__ : Any =d ^ (b & (c ^ d))
A__ : Union[str, Any] =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A__ : Any =c ^ (d & (b ^ c))
A__ : List[str] =(5 * i + 1) % 16
elif i <= 47:
A__ : Any =b ^ c ^ d
A__ : Union[str, Any] =(3 * i + 5) % 16
else:
A__ : Optional[Any] =c ^ (b | not_aa(__snake_case ))
A__ : str =(7 * i) % 16
A__ : Any =(f + a + added_consts[i] + block_words[g]) % 2**32
A__ : Optional[int] =d
A__ : str =c
A__ : Tuple =b
A__ : int =sum_aa(__snake_case, left_rotate_aa(__snake_case, shift_amounts[i] ) )
# Add hashed chunk to running total
A__ : List[Any] =sum_aa(__snake_case, __snake_case )
A__ : List[str] =sum_aa(__snake_case, __snake_case )
A__ : Union[str, Any] =sum_aa(__snake_case, __snake_case )
A__ : str =sum_aa(__snake_case, __snake_case )
A__ : int =reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 687 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'encoder-decoder'
__snake_case = True
def __init__( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ : Tuple =kwargs.pop("""encoder""" )
A__ : Dict =encoder_config.pop("""model_type""" )
A__ : int =kwargs.pop("""decoder""" )
A__ : Optional[int] =decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
A__ : Optional[int] =AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : List[Any] =True
@classmethod
def lowercase__ ( cls : List[str] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Any ) -> PretrainedConfig:
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A__ : str =True
A__ : Union[str, Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ )
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
A__ : List[str] =copy.deepcopy(self.__dict__ )
A__ : Optional[Any] =self.encoder.to_dict()
A__ : Union[str, Any] =self.decoder.to_dict()
A__ : str =self.__class__.model_type
return output
| 687 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 1 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__snake_case : Any = 'scheduler_config.json'
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 1
__snake_case = 2
__snake_case = 3
__snake_case = 4
__snake_case = 5
__snake_case = 6
__snake_case = 7
__snake_case = 8
__snake_case = 9
__snake_case = 10
__snake_case = 11
__snake_case = 12
__snake_case = 13
__snake_case = 14
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase :
'''simple docstring'''
__snake_case = SCHEDULER_CONFIG_NAME
__snake_case = []
__snake_case = True
@classmethod
def lowercase__ ( cls : str , lowerCAmelCase_ : Dict[str, Any] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : Tuple , ) -> Tuple:
'''simple docstring'''
A__ , A__ , A__ : Union[str, Any] =cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase_ , subfolder=lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ , return_commit_hash=lowerCAmelCase_ , **lowerCAmelCase_ , )
return cls.from_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
self.save_config(save_directory=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowercase__ ( cls : Dict ) -> Tuple:
'''simple docstring'''
A__ : List[str] =list(set([cls.__name__] + cls._compatibles ) )
A__ : List[str] =importlib.import_module(__name__.split(""".""" )[0] )
A__ : Optional[int] =[
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) for c in compatible_classes_str if hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
]
return compatible_classes
| 687 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 1 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCamelCase ( __snake_case : Dataset, __snake_case : Dict[str, str] ) -> Any:
"""simple docstring"""
A__ : List[str] =args.log_outputs
A__ : Any ="""_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
A__ : int =load_metric("""wer""" )
A__ : Any =load_metric("""cer""" )
# compute metrics
A__ : Union[str, Any] =wer.compute(references=result["""target"""], predictions=result["""prediction"""] )
A__ : int =cer.compute(references=result["""target"""], predictions=result["""prediction"""] )
# print & log results
A__ : Union[str, Any] =f"WER: {wer_result}\nCER: {cer_result}"
print(__snake_case )
with open(f"{dataset_id}_eval_results.txt", """w""" ) as f:
f.write(__snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ : List[str] =f"log_{dataset_id}_predictions.txt"
A__ : List[Any] =f"log_{dataset_id}_targets.txt"
with open(__snake_case, """w""" ) as p, open(__snake_case, """w""" ) as t:
# mapping function to write output
def write_to_file(__snake_case : Optional[Any], __snake_case : Dict ):
p.write(f"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(__snake_case, with_indices=__snake_case )
def __lowerCamelCase ( __snake_case : str ) -> str:
"""simple docstring"""
A__ : int ="""[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ : Any =re.sub(__snake_case, """""", text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ : Dict =["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
A__ : List[str] =""" """.join(text.split(__snake_case ) )
return text
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ : Union[str, Any] =load_dataset(args.dataset, args.config, split=args.split, use_auth_token=__snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ : str =AutoFeatureExtractor.from_pretrained(args.model_id )
A__ : List[Any] =feature_extractor.sampling_rate
# resample audio
A__ : Optional[int] =dataset.cast_column("""audio""", Audio(sampling_rate=__snake_case ) )
# load eval pipeline
if args.device is None:
A__ : List[str] =0 if torch.cuda.is_available() else -1
A__ : Tuple =pipeline("""automatic-speech-recognition""", model=args.model_id, device=args.device )
# map function to decode audio
def map_to_pred(__snake_case : int ):
A__ : Optional[Any] =asr(
batch["""audio"""]["""array"""], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s )
A__ : Optional[Any] =prediction["""text"""]
A__ : str =normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
A__ : Optional[int] =dataset.map(__snake_case, remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__snake_case, __snake_case )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
__snake_case : List[str] = parser.parse_args()
main(args)
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> str:
"""simple docstring"""
if isinstance(__snake_case, __snake_case ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(__snake_case, __snake_case ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
A__ : List[str] =False
if num < 0:
A__ : Any =True
A__ : str =-num
A__ : list[int] =[]
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__snake_case ) for e in binary )
return "0b" + "".join(str(__snake_case ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Dict = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__snake_case : int = {
'gpt-neox-20b': 2048,
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str="<|endoftext|>" , lowerCAmelCase_ : str="<|endoftext|>" , lowerCAmelCase_ : Tuple="<|endoftext|>" , lowerCAmelCase_ : Optional[Any]=False , **lowerCAmelCase_ : Optional[Any] , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_ ) != add_prefix_space:
A__ : List[Any] =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) )
A__ : str =add_prefix_space
A__ : str =pre_tok_class(**lowerCAmelCase_ )
A__ : Optional[int] =add_prefix_space
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : Union[str, Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : "Conversation" ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] )
if len(lowerCAmelCase_ ) > self.model_max_length:
A__ : Optional[int] =input_ids[-self.model_max_length :]
return input_ids
| 687 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
import torch
from torch import nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ : int =n_token
A__ : str =d_embed
A__ : Dict =d_proj
A__ : int =cutoffs + [n_token]
A__ : Optional[Any] =[0] + self.cutoffs
A__ : Tuple =div_val
A__ : int =self.cutoffs[0]
A__ : Union[str, Any] =len(self.cutoffs ) - 1
A__ : str =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A__ : Dict =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A__ : Union[str, Any] =nn.Parameter(torch.zeros(self.n_clusters ) )
A__ : int =nn.ModuleList()
A__ : Dict =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ : Union[str, Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : List[Any] =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
A__ : int =keep_order
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
if proj is None:
A__ : Optional[Any] =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A__ : List[str] =nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
A__ : str =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=False ) -> Tuple:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
A__ : Optional[Any] =hidden[..., :-1, :].contiguous()
A__ : Optional[int] =labels[..., 1:].contiguous()
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
A__ : Optional[int] =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A__ : Tuple =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A__ : Union[str, Any] =labels != -1_00
A__ : str =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : Optional[int] =(
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A__ : List[str] =nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : List[str] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : List[str] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : Optional[int] =self.out_layers[0].weight[l_idx:r_idx]
A__ : Dict =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : Optional[Any] =self.out_layers[i].weight
A__ : Dict =self.out_layers[i].bias
if i == 0:
A__ : Union[str, Any] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : List[Any] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : List[Any] =weights[0], biases[0], self.out_projs[0]
A__ : Any =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
A__ : Optional[Any] =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A__ : Optional[int] =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : List[str] =0
A__ : List[Any] =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : List[Any] =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A__ : Union[str, Any] =(labels >= l_idx) & (labels < r_idx)
A__ : Tuple =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A__ : Union[str, Any] =labels.index_select(0 , lowerCAmelCase_ ) - l_idx
A__ : List[Any] =head_logprob.index_select(0 , lowerCAmelCase_ )
A__ : List[str] =hidden.index_select(0 , lowerCAmelCase_ )
else:
A__ : Optional[int] =hidden
if i == 0:
if labels is not None:
A__ : List[Any] =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A__ : Optional[int] =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : List[str] =weights[i], biases[i], self.out_projs[i]
A__ : int =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : str =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A__ : Optional[Any] =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A__ : Optional[Any] =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A__ : int =logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters == 0:
A__ : int =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : Optional[int] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : Optional[Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : Optional[int] =self.out_layers[0].weight[l_idx:r_idx]
A__ : Any =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : List[str] =self.out_layers[i].weight
A__ : int =self.out_layers[i].bias
if i == 0:
A__ : List[str] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : Union[str, Any] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : Union[str, Any] =weights[0], biases[0], self.out_projs[0]
A__ : Optional[int] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Union[str, Any] =hidden.new_empty((head_logit.size(0 ), self.n_token) )
A__ : List[Any] =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : Optional[Any] =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : Optional[int] =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A__ : Dict =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : int =weights[i], biases[i], self.out_projs[i]
A__ : Optional[int] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : List[str] =head_logprob[:, -i] + tail_logprob_i
A__ : str =logprob_i
return out
| 687 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__snake_case : Union[str, Any] = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __lowerCamelCase ( __snake_case : int ) -> list[int]:
"""simple docstring"""
if not isinstance(__snake_case, __snake_case ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A__ : str =[]
for num in range(len(__snake_case ) ):
A__ : List[Any] =0
while 2 * i * i <= odd_composites[num]:
A__ : List[str] =odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 687 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : Tuple = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'time_series_transformer'
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_00 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : Dict=True , **lowerCAmelCase_ : str , ) -> Union[str, Any]:
'''simple docstring'''
# time series specific configuration
A__ : Any =prediction_length
A__ : Any =context_length or prediction_length
A__ : Dict =distribution_output
A__ : str =loss
A__ : int =input_size
A__ : Optional[int] =num_time_features
A__ : Optional[int] =lags_sequence
A__ : str =scaling
A__ : Dict =num_dynamic_real_features
A__ : Tuple =num_static_real_features
A__ : List[Any] =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ : Any =cardinality
else:
A__ : Optional[int] =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ : Optional[int] =embedding_dimension
else:
A__ : int =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ : List[str] =num_parallel_samples
# Transformer architecture configuration
A__ : int =input_size * len(lowerCAmelCase_ ) + self._number_of_features
A__ : List[Any] =d_model
A__ : int =encoder_attention_heads
A__ : int =decoder_attention_heads
A__ : Optional[int] =encoder_ffn_dim
A__ : List[Any] =decoder_ffn_dim
A__ : int =encoder_layers
A__ : List[Any] =decoder_layers
A__ : int =dropout
A__ : Optional[Any] =attention_dropout
A__ : int =activation_dropout
A__ : List[Any] =encoder_layerdrop
A__ : List[str] =decoder_layerdrop
A__ : Optional[Any] =activation_function
A__ : str =init_std
A__ : Dict =use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 687 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'wavlm'
def __init__( self : List[Any] , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : Any=7_68 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : str=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1e-5 , lowerCAmelCase_ : Optional[Any]="group" , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Any=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase_ : List[str]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Union[str, Any]=1_28 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Tuple=3_20 , lowerCAmelCase_ : Dict=8_00 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=0.05 , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : str=3_20 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=1_00 , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : Optional[int]=2_56 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Union[str, Any]="mean" , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Tuple=2_56 , lowerCAmelCase_ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase_ : List[str]=(5, 3, 3, 1, 1) , lowerCAmelCase_ : List[Any]=(1, 2, 3, 1, 1) , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : Optional[Any]=80 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
A__ : List[Any] =hidden_size
A__ : Any =feat_extract_norm
A__ : Any =feat_extract_activation
A__ : List[Any] =list(lowerCAmelCase_ )
A__ : List[Any] =list(lowerCAmelCase_ )
A__ : Any =list(lowerCAmelCase_ )
A__ : Union[str, Any] =conv_bias
A__ : Tuple =num_buckets
A__ : str =max_bucket_distance
A__ : Union[str, Any] =num_conv_pos_embeddings
A__ : Tuple =num_conv_pos_embedding_groups
A__ : int =len(self.conv_dim )
A__ : Dict =num_hidden_layers
A__ : List[str] =intermediate_size
A__ : List[str] =hidden_act
A__ : List[Any] =num_attention_heads
A__ : Union[str, Any] =hidden_dropout
A__ : Union[str, Any] =attention_dropout
A__ : Optional[int] =activation_dropout
A__ : List[Any] =feat_proj_dropout
A__ : Any =final_dropout
A__ : str =layerdrop
A__ : int =layer_norm_eps
A__ : Dict =initializer_range
A__ : Any =num_ctc_classes
A__ : Optional[int] =vocab_size
A__ : Union[str, Any] =do_stable_layer_norm
A__ : List[Any] =use_weighted_layer_sum
A__ : int =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Union[str, Any] =apply_spec_augment
A__ : Union[str, Any] =mask_time_prob
A__ : Optional[int] =mask_time_length
A__ : Tuple =mask_time_min_masks
A__ : int =mask_feature_prob
A__ : Optional[Any] =mask_feature_length
# parameters for pretraining with codevector quantized representations
A__ : Tuple =num_codevectors_per_group
A__ : Any =num_codevector_groups
A__ : Tuple =contrastive_logits_temperature
A__ : List[str] =num_negatives
A__ : Optional[int] =codevector_dim
A__ : Optional[Any] =proj_codevector_dim
A__ : Union[str, Any] =diversity_loss_weight
# ctc loss
A__ : Optional[int] =ctc_loss_reduction
A__ : Any =ctc_zero_infinity
# adapter
A__ : Optional[int] =add_adapter
A__ : Optional[int] =adapter_kernel_size
A__ : Dict =adapter_stride
A__ : Optional[Any] =num_adapter_layers
A__ : str =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ : Dict =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ : List[Any] =list(lowerCAmelCase_ )
A__ : Tuple =list(lowerCAmelCase_ )
A__ : Optional[Any] =list(lowerCAmelCase_ )
A__ : List[Any] =xvector_output_dim
@property
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 687 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__snake_case : str = logging.get_logger(__name__)
__snake_case : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__snake_case : str = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : str, __snake_case : str, __snake_case : List[Any], __snake_case : int ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split(""".""" ):
A__ : Optional[int] =getattr(__snake_case, __snake_case )
if weight_type is not None:
A__ : Any =getattr(__snake_case, __snake_case ).shape
else:
A__ : Optional[Any] =hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A__ : Tuple =value
elif weight_type == "weight_g":
A__ : Union[str, Any] =value
elif weight_type == "weight_v":
A__ : List[str] =value
elif weight_type == "bias":
A__ : int =value
else:
A__ : Optional[Any] =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] =[]
A__ : Any =fairseq_model.state_dict()
A__ : Union[str, Any] =hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ : Dict =False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == """group""", )
A__ : Any =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : Optional[Any] =True
if "*" in mapped_key:
A__ : int =name.split(__snake_case )[0].split(""".""" )[-2]
A__ : Union[str, Any] =mapped_key.replace("""*""", __snake_case )
if "weight_g" in name:
A__ : Any ="""weight_g"""
elif "weight_v" in name:
A__ : Dict ="""weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
A__ : int ="""bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ : int ="""weight"""
else:
A__ : str =None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowerCamelCase ( __snake_case : Any, __snake_case : Optional[int], __snake_case : Optional[int], __snake_case : List[str], __snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
A__ : List[str] =full_name.split("""conv_layers.""" )[-1]
A__ : Optional[Any] =name.split(""".""" )
A__ : int =int(items[0] )
A__ : Optional[int] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A__ : Any =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A__ : Tuple =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A__ : int =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A__ : Any =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[int], __snake_case : Union[str, Any]=None ) -> int:
"""simple docstring"""
A__ : Any =torch.load(__snake_case )
A__ : Tuple =WavLMConfigOrig(checkpoint["""cfg"""] )
A__ : int =WavLMOrig(__snake_case )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
A__ : str =WavLMConfig.from_pretrained(__snake_case )
else:
A__ : str =WavLMConfig()
A__ : Tuple =WavLMModel(__snake_case )
recursively_load_weights(__snake_case, __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__snake_case : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 687 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , lowerCAmelCase_ : int = 6_55_36 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (32, 32, 64) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =sample_size
# time
if time_embedding_type == "fourier":
A__ : Any =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
A__ : Any =2 * block_out_channels[0]
elif time_embedding_type == "positional":
A__ : Tuple =Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
A__ : List[Any] =block_out_channels[0]
if use_timestep_embedding:
A__ : int =block_out_channels[0] * 4
A__ : int =TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
A__ : List[Any] =nn.ModuleList([] )
A__ : List[Any] =None
A__ : List[Any] =nn.ModuleList([] )
A__ : int =None
# down
A__ : Tuple =in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
A__ : Optional[Any] =output_channel
A__ : Optional[int] =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A__ : Optional[int] =i == len(lowerCAmelCase_ ) - 1
A__ : Optional[int] =get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
A__ : Tuple =get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
A__ : int =list(reversed(lowerCAmelCase_ ) )
A__ : Optional[Any] =reversed_block_out_channels[0]
if out_block_type is None:
A__ : Optional[Any] =out_channels
else:
A__ : Tuple =block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
A__ : List[str] =output_channel
A__ : List[str] =(
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
A__ : Optional[int] =i == len(lowerCAmelCase_ ) - 1
A__ : List[Any] =get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
A__ : int =output_channel
# out
A__ : Tuple =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A__ : str =get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
A__ : Any =timestep
if not torch.is_tensor(lowerCAmelCase_ ):
A__ : List[Any] =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
A__ : int =timesteps[None].to(sample.device )
A__ : List[str] =self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
A__ : Union[str, Any] =self.time_mlp(lowerCAmelCase_ )
else:
A__ : Any =timestep_embed[..., None]
A__ : int =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A__ : str =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A__ : Dict =()
for downsample_block in self.down_blocks:
A__ , A__ : List[Any] =downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A__ : int =self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A__ : int =down_block_res_samples[-1:]
A__ : Any =down_block_res_samples[:-1]
A__ : List[str] =upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
A__ : Any =self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> None:
'''simple docstring'''
A__ : Tuple =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
A__ : Optional[Any] =Vector()
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A__ : Union[str, Any] =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowerCAmelCase_ ) , """(0,0,0,0,0,1)""" )
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
A__ : Optional[int] =Vector([1, 2, 3, 4] )
self.assertEqual(len(lowerCAmelCase_ ) , 4 )
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A__ : Any =Vector([1, 2] )
A__ : Tuple =Vector([1, 2, 3, 4, 5] )
A__ : Union[str, Any] =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
A__ : Any =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowercase__ ( self : List[Any] ) -> None:
'''simple docstring'''
A__ : str =Vector([1, 2, 3] )
A__ : Union[str, Any] =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowercase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =Vector([1, 2, 3] )
A__ : List[str] =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowercase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =Vector([1, 2, 3] )
A__ : int =Vector([2, -1, 4] ) # for test of dot product
A__ : List[str] =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def lowercase__ ( self : Dict ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def lowercase__ ( self : Tuple ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A__ : Any =Vector([1, 2, 3] )
A__ : List[Any] =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_ ) ) , """(3,4,7)""" )
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
A__ : Tuple =Vector([1, 0, 0, 0, 0, 0] )
A__ : Optional[Any] =x.copy()
self.assertEqual(str(lowerCAmelCase_ ) , str(lowerCAmelCase_ ) )
def lowercase__ ( self : Dict ) -> None:
'''simple docstring'''
A__ : int =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowerCAmelCase_ ) , """(0,1,0)""" )
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A__ : str =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_ ) )
def lowercase__ ( self : Tuple ) -> None:
'''simple docstring'''
A__ : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : Union[str, Any] =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
A__ : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : Any =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase__ ( self : List[Any] ) -> None:
'''simple docstring'''
A__ : str =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowercase__ ( self : Optional[int] ) -> None:
'''simple docstring'''
A__ : int =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
A__ : str =Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A__ : str =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_ ) )
def lowercase__ ( self : str ) -> None:
'''simple docstring'''
A__ : Union[str, Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
A__ : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : Tuple =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def lowercase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
A__ : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : List[str] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 687 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int, __snake_case : int ) -> np.ndarray:
"""simple docstring"""
A__ : int =np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A__ : Dict =0
A__ : Optional[int] =0
A__ : str =0
A__ : Optional[Any] =0
# compute the shape of the output matrix
A__ : Tuple =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A__ : str =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A__ : Any =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ : str =0
A__ : List[str] =0
return updated_arr
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int, __snake_case : int ) -> np.ndarray:
"""simple docstring"""
A__ : Union[str, Any] =np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A__ : Union[str, Any] =0
A__ : Optional[Any] =0
A__ : Tuple =0
A__ : str =0
# compute the shape of the output matrix
A__ : Optional[int] =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A__ : Optional[int] =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A__ : Any =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ : Dict =0
A__ : str =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__snake_case : Any = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case : Union[str, Any] = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
A__ : List[str] =1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
A__ : Tuple =n - k
# Calculate C(n,k)
for i in range(__snake_case ):
result *= n - i
result //= i + 1
return result
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count, __snake_case ) // (node_count + 1)
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
A__ : List[str] =1
for i in range(1, n + 1 ):
result *= i
return result
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
return catalan_number(__snake_case ) * factorial(__snake_case )
if __name__ == "__main__":
__snake_case : Tuple = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 687 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=13 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Optional[Any]="last" , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=None , ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =parent
A__ : Tuple =batch_size
A__ : List[str] =seq_length
A__ : Optional[int] =is_training
A__ : Any =use_input_lengths
A__ : Dict =use_token_type_ids
A__ : int =use_labels
A__ : Optional[int] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : str =causal
A__ : List[Any] =asm
A__ : List[str] =n_langs
A__ : List[Any] =vocab_size
A__ : str =n_special
A__ : str =hidden_size
A__ : Any =num_hidden_layers
A__ : Optional[Any] =num_attention_heads
A__ : Tuple =hidden_dropout_prob
A__ : Optional[int] =attention_probs_dropout_prob
A__ : int =max_position_embeddings
A__ : Optional[int] =type_vocab_size
A__ : List[Any] =type_sequence_label_size
A__ : Dict =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =num_choices
A__ : int =summary_type
A__ : List[str] =use_proj
A__ : int =scope
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : str =random_attention_mask([self.batch_size, self.seq_length] )
A__ : int =None
if self.use_input_lengths:
A__ : int =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : str =None
if self.use_token_type_ids:
A__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Dict =None
A__ : List[Any] =None
A__ : Dict =None
if self.use_labels:
A__ : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict =ids_tensor([self.batch_size] , 2 ).float()
A__ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
A__ : List[str] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowercase__ ( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =FlaubertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[Any] =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Dict =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : int =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , ) -> int:
'''simple docstring'''
A__ : int =FlaubertWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any =FlaubertForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[Any] =model(lowerCAmelCase_ )
A__ : str =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Any =FlaubertForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =model(lowerCAmelCase_ )
A__ : str =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[str] =result_with_labels.to_tuple()
A__ : Union[str, Any] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Optional[Any] =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =FlaubertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ )
A__ : Union[str, Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> List[Any]:
'''simple docstring'''
A__ : Tuple =self.num_labels
A__ : List[Any] =FlaubertForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =self.num_choices
A__ : Optional[int] =FlaubertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : int =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[Any] =config_and_inputs
A__ : Union[str, Any] ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
A__ : Tuple =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Union[str, Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : int =FlaubertModelTester(self )
A__ : int =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] =FlaubertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
@require_torch_gpu
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
A__ : Optional[int] =True
A__ : Dict =model_class(config=lowerCAmelCase_ )
A__ : Union[str, Any] =self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) )
A__ : Optional[Any] =torch.jit.load(os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) , map_location=lowerCAmelCase_ )
loaded(inputs_dict["""input_ids"""].to(lowerCAmelCase_ ) , inputs_dict["""attention_mask"""].to(lowerCAmelCase_ ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : int =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
A__ : Dict =torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
A__ : List[Any] =model(lowerCAmelCase_ )[0]
A__ : Tuple =torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : Any =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 687 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 1 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 687 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 1 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=99 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[str]=5_12 , lowerCAmelCase_ : List[Any]=16 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Union[str, Any]=1 , ) -> Tuple:
'''simple docstring'''
A__ : int =parent
A__ : Union[str, Any] =batch_size
A__ : Dict =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_mask
A__ : Tuple =use_token_type_ids
A__ : Tuple =use_labels
A__ : List[Any] =vocab_size
A__ : Any =hidden_size
A__ : str =num_hidden_layers
A__ : Tuple =num_attention_heads
A__ : List[str] =intermediate_size
A__ : Optional[Any] =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : Tuple =attention_probs_dropout_prob
A__ : Optional[Any] =max_position_embeddings
A__ : Tuple =type_vocab_size
A__ : List[str] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : str =num_labels
A__ : Any =num_choices
A__ : Optional[int] =scope
A__ : Dict =q_groups
A__ : Any =k_groups
A__ : List[Any] =v_groups
A__ : List[str] =post_attention_groups
A__ : Dict =intermediate_groups
A__ : Dict =output_groups
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple =None
if self.use_input_mask:
A__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] =None
A__ : str =None
A__ : Tuple =None
if self.use_labels:
A__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any =ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =SqueezeBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : List[Any] =SqueezeBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
A__ : List[str] =self.num_labels
A__ : List[str] =SqueezeBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =self.num_labels
A__ : Optional[Any] =SqueezeBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
A__ : int =self.num_choices
A__ : Optional[Any] =SqueezeBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : str =self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : Tuple =config_and_inputs
A__ : Optional[int] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = True
__snake_case = False
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModelTester(self )
A__ : int =ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Dict =SqueezeBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : List[str] =SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
A__ : Dict =torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
A__ : List[Any] =model(lowerCAmelCase_ )[0]
A__ : int =torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : int =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = None
__snake_case = BloomTokenizerFast
__snake_case = BloomTokenizerFast
__snake_case = True
__snake_case = False
__snake_case = 'tokenizer_file'
__snake_case = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
super().setUp()
A__ : Union[str, Any] =BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple , **lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : Any =self.get_rust_tokenizer()
A__ : List[str] =["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
A__ : int =[[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
A__ : Optional[int] =tokenizer.batch_encode_plus(lowerCAmelCase_ )["""input_ids"""]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : int=6 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ : Tuple ="""This is a simple input"""
A__ : Union[str, Any] =["""This is a simple input 1""", """This is a simple input 2"""]
A__ : List[str] =("""This is a simple input""", """This is a pair""")
A__ : Any =[
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
tokenizer_r.encode_plus(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
tokenizer_r.batch_encode_plus(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
tokenizer_r.encode(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
tokenizer_r.batch_encode_plus(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
A__ : int =None # Hotfixing padding = None
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : str =self.get_rust_tokenizer()
A__ : Tuple =load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=lowerCAmelCase_ )
A__ : int =next(iter(lowerCAmelCase_ ) )["""premise"""] # pick up one data
A__ : Optional[int] =list(sample_data.values() )
A__ : Tuple =list(map(tokenizer.encode , lowerCAmelCase_ ) )
A__ : Optional[Any] =[tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) for x in output_tokens]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 687 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =10
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[int] =[1, 2, 3, 4]
A__ : Dict =[1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Tuple =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ : Union[str, Any] =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ : Optional[int] =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] ="""It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
A__ , A__ : Optional[Any] =process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Any =""""""
A__ , A__ : List[str] =process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : Optional[int] =(
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
A__ , A__ : Union[str, Any] =process_story(lowerCAmelCase_ )
A__ : List[str] =[
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[Any] =["""It was the best of times."""]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =torch.tensor([1, 2, 3, 4] )
A__ : Optional[Any] =torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 0 ).numpy() , expected.numpy() )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[int] =torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ : List[str] =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 23 ).numpy() , expected.numpy() )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : List[Any] =torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ : List[str] =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 1 ).numpy() , expected.numpy() )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ : str =1_01
A__ : str =torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
A__ : Optional[int] =torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ : List[str] =compute_token_type_ids(lowerCAmelCase_ , lowerCAmelCase_ )
np.testing.assert_array_equal(lowerCAmelCase_ , lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 1 |
'''simple docstring'''
import os
import sys
import unittest
__snake_case : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case : Optional[int] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
__snake_case : List[str] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : Optional[Any] =get_test_to_tester_mapping(lowerCAmelCase_ )
A__ : int =get_test_to_tester_mapping(lowerCAmelCase_ )
A__ : List[Any] ={"""BertModelTest""": """BertModelTester"""}
A__ : Any ={
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
A__ : Tuple =get_model_to_test_mapping(lowerCAmelCase_ )
A__ : List[str] =get_model_to_test_mapping(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A__ : int ={
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =get_model_to_tester_mapping(lowerCAmelCase_ )
A__ : List[str] =get_model_to_tester_mapping(lowerCAmelCase_ )
A__ : List[str] ={
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A__ : Optional[Any] ={
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : str, __snake_case : list[str] ) -> str:
"""simple docstring"""
A__ : Optional[Any] =""""""
for word_or_phrase in separated:
if not isinstance(__snake_case, __snake_case ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCamelCase ( __snake_case : str = "laptop" ) -> DataFrame:
"""simple docstring"""
A__ : str =f"https://www.amazon.in/laptop/s?k={product}"
A__ : Dict ={
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
A__ : List[str] =BeautifulSoup(requests.get(__snake_case, headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
A__ : Union[str, Any] =DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""", attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""}, ), soup.find_all("""div""", attrs={"""class""": """a-row a-size-base a-color-base"""} ), ):
try:
A__ : Tuple =item.ha.text
A__ : Optional[Any] ="""https://www.amazon.in/""" + item.ha.a["""href"""]
A__ : Any =item.find("""span""", attrs={"""class""": """a-offscreen"""} ).text
try:
A__ : Tuple =item.find("""span""", attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
A__ : Optional[Any] ="""Not available"""
try:
A__ : Tuple =(
"""₹"""
+ item.find(
"""span""", attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
A__ : Union[str, Any] =""""""
try:
A__ : Optional[int] =float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""", """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
)
* 100 )
except ValueError:
A__ : Optional[int] =float("""nan""" )
except AttributeError:
pass
A__ : List[str] =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A__ : Any =""" """
A__ : Optional[int] =""" """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Tuple = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__snake_case : int = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 8 , **lowerCAmelCase_ : str , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : Optional[int] =do_rescale
A__ : List[Any] =rescale_factor
A__ : Any =do_pad
A__ : List[Any] =pad_size
def lowercase__ ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ) -> Any:
'''simple docstring'''
A__ , A__ : Tuple =get_image_size(lowerCAmelCase_ )
A__ : Optional[int] =(old_height // size + 1) * size - old_height
A__ : List[Any] =(old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
A__ : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : List[str] =do_pad if do_pad is not None else self.do_pad
A__ : Union[str, Any] =pad_size if pad_size is not None else self.pad_size
A__ : List[Any] =make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
A__ : Optional[int] =[to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
A__ : Dict =[self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
A__ : int =[self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
A__ : Union[str, Any] =[to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
A__ : Any ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__snake_case : List[Any] = object()
# For specifying empty leaf dict `{}`
__snake_case : Any = object()
def __lowerCamelCase ( __snake_case : Any, __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] =tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
A__ : List[str] =[x.match(__snake_case ) for x, y in zip(__snake_case, ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
def replace(__snake_case : List[str], __snake_case : List[Any] ):
for rule, replacement in rules:
if _match(__snake_case, __snake_case ):
return replacement
return val
return replace
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", __snake_case )),
(("transformer", "wte", "embedding"), P("""mp""", __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCamelCase ( __snake_case : List[str] ) -> str:
"""simple docstring"""
A__ : List[Any] =_get_partition_rules()
A__ : Dict =_replacement_rules(__snake_case )
A__ : Optional[Any] ={k: _unmatched for k in flatten_dict(__snake_case )}
A__ : Optional[Any] ={k: replace(__snake_case, __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 687 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =start
A__ : Optional[Any] =end
A__ : Optional[Any] =val
A__ : List[str] =(start + end) // 2
A__ : Tuple =left
A__ : Union[str, Any] =right
def __repr__( self : Dict ) -> int:
'''simple docstring'''
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : Sequence , lowerCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
A__ : List[str] =collection
A__ : Tuple =function
if self.collection:
A__ : Union[str, Any] =self._build_tree(0 , len(lowerCAmelCase_ ) - 1 )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
self._update_tree(self.root , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ) -> Optional[int]:
'''simple docstring'''
return self._query_range(self.root , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowerCAmelCase_ , lowerCAmelCase_ , self.collection[start] )
A__ : Optional[int] =(start + end) // 2
A__ : Dict =self._build_tree(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =self._build_tree(mid + 1 , lowerCAmelCase_ )
return SegmentTreeNode(lowerCAmelCase_ , lowerCAmelCase_ , self.fn(left.val , right.val ) , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if node.start == i and node.end == i:
A__ : Any =val
return
if i <= node.mid:
self._update_tree(node.left , lowerCAmelCase_ , lowerCAmelCase_ )
else:
self._update_tree(node.right , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =self.fn(node.left.val , node.right.val )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowerCAmelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowerCAmelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
if self.root is not None:
A__ : int =Queue()
queue.put(self.root )
while not queue.empty():
A__ : str =queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
__snake_case : Optional[int] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 687 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__snake_case : Optional[int] = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__snake_case = None
__snake_case = None
class lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__snake_case = datasets.Audio()
__snake_case = 'audio'
__snake_case = AudioFolderConfig
__snake_case = 42 # definition at the bottom of the script
__snake_case = AudioClassification(audio_column='audio' , label_column='label' )
__snake_case : int = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__snake_case : List[str] = AUDIO_EXTENSIONS
| 687 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
A__ : list[list[int]] =[[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A__ : List[Any] =1
for n in range(m + 1 ):
for k in range(1, __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__snake_case : List[Any] = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__snake_case : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 687 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
from collections import defaultdict
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A__ : int =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCAmelCase_ ) )
]
A__ : Dict =defaultdict(lowerCAmelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A__ : Union[str, Any] =(1 << len(lowerCAmelCase_ )) - 1
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A__ : List[str] =self.count_ways_until(lowerCAmelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
A__ : Dict =total_ways_util
return self.dp[mask][task_no]
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
# Store the list of persons for each task
for i in range(len(lowerCAmelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(lowerCAmelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case : List[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 687 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 100 ) -> int:
"""simple docstring"""
A__ : Tuple =n * (n + 1) * (2 * n + 1) / 6
A__ : Optional[Any] =(n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 687 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 10, __snake_case : int = 22 ) -> int:
"""simple docstring"""
A__ : str =range(1, __snake_case )
A__ : Optional[int] =range(1, __snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 687 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] ) -> None:
'''simple docstring'''
A__ : Optional[int] =[2, 1, 2, -1]
A__ : Dict =[1, 2, 3, 4]
def lowercase__ ( self : List[str] ) -> list[float]:
'''simple docstring'''
A__ : Dict =len(self.first_signal )
A__ : Optional[int] =len(self.second_signal )
A__ : int =max(lowerCAmelCase_ , lowerCAmelCase_ )
# create a zero matrix of max_length x max_length
A__ : List[Any] =[[0] * max_length for i in range(lowerCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase_ ):
A__ : Dict =deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase_ )
for j, item in enumerate(lowerCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
A__ : int =np.matmul(np.transpose(lowerCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : list[int], __snake_case : list[int], __snake_case : list[int], __snake_case : list[list[str]], __snake_case : int, ) -> None:
"""simple docstring"""
A__ : Union[str, Any] =len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], __snake_case, __snake_case, )
def __lowerCamelCase ( __snake_case : int ) -> None:
"""simple docstring"""
A__ : list[list[str]] =[]
depth_first_search([], [], [], __snake_case, __snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print("""""" )
print(len(__snake_case ), """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 687 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(__snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 687 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'roberta'
def __init__( self : List[str] , lowerCAmelCase_ : Optional[Any]=5_02_65 , lowerCAmelCase_ : Optional[int]=7_68 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=5_12 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Tuple="absolute" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : List[Any] =vocab_size
A__ : int =hidden_size
A__ : Tuple =num_hidden_layers
A__ : int =num_attention_heads
A__ : List[str] =hidden_act
A__ : int =intermediate_size
A__ : List[Any] =hidden_dropout_prob
A__ : Any =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Any =initializer_range
A__ : Optional[Any] =layer_norm_eps
A__ : List[str] =position_embedding_type
A__ : Tuple =use_cache
A__ : Optional[int] =classifier_dropout
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Union[str, Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : List[str] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 687 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__snake_case : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__snake_case = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__snake_case = field(
default=lowercase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(default=lowercase_ , metadata={'help': 'The input training data file (a text file).'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__snake_case = field(
default=lowercase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case = field(
default=lowercase_ , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__snake_case = field(
default=lowercase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__snake_case = field(
default=lowercase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
if self.train_file is not None:
A__ : Dict =self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ : List[Any] =self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 42
__snake_case = True
__snake_case = None
__snake_case = None
def __call__( self : List[str] , lowerCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
A__ : str ="""label""" if """label""" in features[0].keys() else """labels"""
A__ : str =[feature.pop(lowerCAmelCase_ ) for feature in features]
A__ : Any =len(lowerCAmelCase_ )
A__ : List[str] =len(features[0]["""input_ids"""] )
A__ : Union[str, Any] =[
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase_ )] for feature in features
]
A__ : List[str] =list(chain(*lowerCAmelCase_ ) )
A__ : List[str] =self.tokenizer.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
A__ : Tuple ={k: v.view(lowerCAmelCase_ , lowerCAmelCase_ , -1 ) for k, v in batch.items()}
# Add back labels
A__ : Optional[int] =torch.tensor(lowerCAmelCase_ , dtype=torch.intaa )
return batch
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ : Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ : int =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""", __snake_case, __snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ : Tuple =training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
A__ : Tuple =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : List[str] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ : Union[str, Any] ={}
if data_args.train_file is not None:
A__ : Optional[Any] =data_args.train_file
if data_args.validation_file is not None:
A__ : Union[str, Any] =data_args.validation_file
A__ : List[Any] =data_args.train_file.split(""".""" )[-1]
A__ : Optional[int] =load_dataset(
__snake_case, data_files=__snake_case, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
A__ : List[str] =load_dataset(
"""swag""", """regular""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : Optional[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A__ : Optional[int] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A__ : Dict =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=__snake_case, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ : str =[f"ending{i}" for i in range(4 )]
A__ : Optional[int] ="""sent1"""
A__ : Optional[int] ="""sent2"""
if data_args.max_seq_length is None:
A__ : List[Any] =tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
A__ : Any =1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
A__ : Optional[Any] =min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__snake_case : Dict ):
A__ : List[Any] =[[context] * 4 for context in examples[context_name]]
A__ : Union[str, Any] =examples[question_header_name]
A__ : Dict =[
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(__snake_case )
]
# Flatten out
A__ : Union[str, Any] =list(chain(*__snake_case ) )
A__ : Any =list(chain(*__snake_case ) )
# Tokenize
A__ : List[str] =tokenizer(
__snake_case, __snake_case, truncation=__snake_case, max_length=__snake_case, padding="""max_length""" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(__snake_case ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
A__ : Optional[int] =raw_datasets["""train"""]
if data_args.max_train_samples is not None:
A__ : Optional[int] =min(len(__snake_case ), data_args.max_train_samples )
A__ : Tuple =train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
A__ : Union[str, Any] =train_dataset.map(
__snake_case, batched=__snake_case, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
A__ : int =raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
A__ : Dict =min(len(__snake_case ), data_args.max_eval_samples )
A__ : List[str] =eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
A__ : int =eval_dataset.map(
__snake_case, batched=__snake_case, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
A__ : List[Any] =(
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__snake_case, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__snake_case : List[str] ):
A__ , A__ : List[str] =eval_predictions
A__ : str =np.argmax(__snake_case, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ : Optional[int] =Trainer(
model=__snake_case, args=__snake_case, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=__snake_case, data_collator=__snake_case, compute_metrics=__snake_case, )
# Training
if training_args.do_train:
A__ : Optional[Any] =None
if training_args.resume_from_checkpoint is not None:
A__ : List[str] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ : Any =last_checkpoint
A__ : str =trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ : List[Any] =train_result.metrics
A__ : List[Any] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
A__ : Union[str, Any] =min(__snake_case, len(__snake_case ) )
trainer.log_metrics("""train""", __snake_case )
trainer.save_metrics("""train""", __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A__ : List[str] =trainer.evaluate()
A__ : Dict =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
A__ : int =min(__snake_case, len(__snake_case ) )
trainer.log_metrics("""eval""", __snake_case )
trainer.save_metrics("""eval""", __snake_case )
A__ : Dict ={
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def __lowerCamelCase ( __snake_case : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 687 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[Any]=37 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=5_12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=4 , ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =parent
A__ : Tuple =batch_size
A__ : str =seq_length
A__ : int =is_training
A__ : Union[str, Any] =use_attention_mask
A__ : Optional[int] =use_token_type_ids
A__ : Dict =use_labels
A__ : Dict =vocab_size
A__ : Any =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : Dict =num_attention_heads
A__ : Dict =intermediate_size
A__ : Tuple =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Tuple =type_vocab_size
A__ : List[str] =type_sequence_label_size
A__ : Any =initializer_range
A__ : Tuple =num_choices
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] =None
if self.use_token_type_ids:
A__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int =AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : int =config_and_inputs
A__ : int ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =FlaxAlbertModelTester(self )
@slow
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ : List[Any] =model_class_name.from_pretrained("""albert-base-v2""" )
A__ : int =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[Any] =FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A__ : Optional[Any] =np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A__ : str =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ : Union[str, Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
A__ : Optional[Any] =(1, 11, 7_68)
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : str =np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
| 687 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 1 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowerCamelCase ( __snake_case : list[int], __snake_case : list[int], __snake_case : int ) -> list[int]:
"""simple docstring"""
A__ : List[str] =[0] * no_of_processes
A__ : int =[0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__snake_case ):
A__ : int =burst_time[i]
A__ : List[str] =0
A__ : Tuple =0
A__ : List[str] =999_999_999
A__ : Optional[int] =0
A__ : int =False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A__ : Tuple =remaining_time[j]
A__ : Any =j
A__ : List[Any] =True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A__ : Optional[int] =remaining_time[short]
if minm == 0:
A__ : Dict =999_999_999
if remaining_time[short] == 0:
complete += 1
A__ : Optional[int] =False
# Find finish time of current process
A__ : Any =increment_time + 1
# Calculate waiting time
A__ : int =finish_time - arrival_time[short]
A__ : Tuple =finar - burst_time[short]
if waiting_time[short] < 0:
A__ : List[str] =0
# Increment time
increment_time += 1
return waiting_time
def __lowerCamelCase ( __snake_case : list[int], __snake_case : int, __snake_case : list[int] ) -> list[int]:
"""simple docstring"""
A__ : str =[0] * no_of_processes
for i in range(__snake_case ):
A__ : int =burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCamelCase ( __snake_case : list[int], __snake_case : list[int], __snake_case : int ) -> None:
"""simple docstring"""
A__ : str =0
A__ : Optional[int] =0
for i in range(__snake_case ):
A__ : int =total_waiting_time + waiting_time[i]
A__ : Any =total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("""Average turn around time =""", total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
__snake_case : Tuple = int(input())
__snake_case : int = [0] * no_of_processes
__snake_case : Union[str, Any] = [0] * no_of_processes
__snake_case : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
__snake_case , __snake_case : int = map(int, input().split())
__snake_case : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case : Optional[int] = burst_time
__snake_case : str = no_of_processes
__snake_case : Union[str, Any] = waiting_time
__snake_case : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case : Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 687 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def __lowerCamelCase ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if hor == 128:
A__ : List[str] =("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
A__ : str =(32, 128, 256)
A__ : List[str] =("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
A__ : Optional[int] =("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
A__ : Dict =(32, 64, 128, 256)
A__ : Tuple =("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
A__ : Optional[Any] =torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
A__ : int =model.state_dict()
A__ : Any ={
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65_536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
A__ : Optional[int] =UNetaDModel(**__snake_case )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
A__ : Dict =dict(zip(model.state_dict().keys(), hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ : List[str] =state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict(), f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", """w""" ) as f:
json.dump(__snake_case, __snake_case )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[Any] ={
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65_536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
A__ : Optional[int] =torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
A__ : List[Any] =model
A__ : Optional[Any] =UNetaDModel(**__snake_case )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
A__ : Optional[int] =dict(zip(state_dict.keys(), hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ : Tuple =state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict(), """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""", """w""" ) as f:
json.dump(__snake_case, __snake_case )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__snake_case : Optional[int] = None
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
__snake_case = TaTokenizer
__snake_case = []
def __init__( self : Dict , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : Dict="<unk>" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Union[str, Any]=1_00 , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Any , ) -> str:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A__ : List[str] =[f"<extra_id_{i}>" for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
A__ : Optional[Any] =len(set(filter(lambda lowerCAmelCase_ : bool("""extra_id_""" in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Union[str, Any] =vocab_file
A__ : Any =False if not self.vocab_file else True
A__ : str =extra_ids
@staticmethod
def lowercase__ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
A__ : Optional[Any] =TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , lowerCAmelCase_ , )
return max_model_length
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Any =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : int =token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
A__ : Optional[int] =token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Optional[int] =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return list(
set(filter(lambda lowerCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , lowerCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return [self.convert_tokens_to_ids(lowerCAmelCase_ ) for token in self.get_sentinel_tokens()]
| 687 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__snake_case : List[str] = logging.getLogger()
__snake_case : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Any ) -> int:
'''simple docstring'''
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
A__ : List[str] ={"""source""": """What is love ?""", """target""": """life"""}
A__ : List[Any] ={"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
A__ : Optional[int] ="""\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"{split}.{field}" ) , """w""" ) as f:
f.write(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> Tuple:
'''simple docstring'''
A__ : int =self.get_auto_remove_tmp_dir()
A__ : Union[str, Any] =os.path.join(lowerCAmelCase_ , """output""" )
A__ : Optional[Any] =os.path.join(lowerCAmelCase_ , """data""" )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
A__ : List[str] =f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
A__ : int =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
A__ : Dict =os.path.join(lowerCAmelCase_ , """metrics.json""" )
with open(lowerCAmelCase_ ) as f:
A__ : Optional[int] =json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ : Dict =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : List[str] =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : str =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 687 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 42
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default='Translation' , init=lowercase_ , repr=lowercase_ )
def __call__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default='TranslationVariableLanguages' , init=lowercase_ , repr=lowercase_ )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ : Tuple =sorted(set(self.languages ) ) if self.languages else None
A__ : str =len(self.languages ) if self.languages else None
def __call__( self : List[str] ) -> List[str]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =set(self.languages )
if self.languages and set(lowerCAmelCase_ ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(lowerCAmelCase_ ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase_ )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A__ : str =[]
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A__ , A__ : Union[str, Any] =zip(*sorted(lowerCAmelCase_ ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self : Dict ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 687 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = None
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = None
__snake_case = None
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = True
__snake_case = None
__snake_case = 1
__snake_case = None
__snake_case = False
__snake_case = None
__snake_case = None
def lowercase__ ( self : List[str] ) -> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_ ) for k, v in self.__dict__.items()} )
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""rsa""", 1_024 )
print("""Key files generation successful.""" )
def __lowerCamelCase ( __snake_case : int ) -> tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print("""Generating prime p...""" )
A__ : Optional[Any] =rabinMiller.generate_large_prime(__snake_case )
print("""Generating prime q...""" )
A__ : Any =rabinMiller.generate_large_prime(__snake_case )
A__ : str =p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
A__ : List[Any] =random.randrange(2 ** (key_size - 1), 2 ** (key_size) )
if cryptoMath.gcd(__snake_case, (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
A__ : Tuple =cryptoMath.find_mod_inverse(__snake_case, (p - 1) * (q - 1) )
A__ : int =(n, e)
A__ : Optional[Any] =(n, d)
return (public_key, private_key)
def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> None:
"""simple docstring"""
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print("""\nWARNING:""" )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
A__ , A__ : Union[str, Any] =generate_key(__snake_case )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt", """w""" ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt", """w""" ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case : Tuple = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__snake_case : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__snake_case : Union[str, Any] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Optional[int]=False ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =compute_bleu(
reference_corpus=lowerCAmelCase_ , translation_corpus=lowerCAmelCase_ , max_order=lowerCAmelCase_ , smooth=lowerCAmelCase_ )
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : str =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 687 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Optional[Any] =jax.random.PRNGKey(0 )
A__ : List[str] =50
A__ : List[str] =jax.device_count()
A__ : List[str] =num_samples * [prompt]
A__ : List[str] =num_samples * [init_image]
A__ : Tuple =num_samples * [mask_image]
A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : List[Any] =shard(lowerCAmelCase_ )
A__ : Union[str, Any] =shard(lowerCAmelCase_ )
A__ : str =shard(lowerCAmelCase_ )
A__ : List[str] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case : int = logging.getLogger(__name__)
def __lowerCamelCase ( __snake_case : torch.nn.Module, __snake_case : BnbQuantizationConfig, __snake_case : Union[str, os.PathLike] = None, __snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None, __snake_case : Optional[List[str]] = None, __snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None, __snake_case : Optional[Union[str, os.PathLike]] = None, __snake_case : bool = False, ) -> Any:
"""simple docstring"""
A__ : str =bnb_quantization_config.load_in_abit
A__ : str =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A__ : Union[str, Any] =[]
# custom device map
if isinstance(__snake_case, __snake_case ) and len(device_map.keys() ) > 1:
A__ : List[str] =[key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ : Any =get_keys_to_not_convert(__snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__snake_case )
A__ : Optional[int] =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ : Optional[Any] =[]
A__ : List[str] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__snake_case )
# compatibility with peft
A__ : Optional[int] =load_in_abit
A__ : Union[str, Any] =load_in_abit
A__ : Any =get_parameter_device(__snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A__ : List[Any] =replace_with_bnb_layers(__snake_case, __snake_case, modules_to_not_convert=__snake_case )
# convert param to the right dtype
A__ : int =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ : int =name.replace(""".weight""", """""" ).replace(""".bias""", """""" )
A__ : Any =getattr(__snake_case, __snake_case, __snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__snake_case ):
param.to(__snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
A__ : Dict =replace_with_bnb_layers(
__snake_case, __snake_case, modules_to_not_convert=__snake_case )
A__ : Tuple =get_quantized_model_device_map(
__snake_case, __snake_case, __snake_case, max_memory=__snake_case, no_split_module_classes=__snake_case, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ : Tuple =True
A__ : Any =any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
__snake_case, __snake_case, __snake_case, dtype=bnb_quantization_config.torch_dtype, offload_folder=__snake_case, offload_state_dict=__snake_case, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(__snake_case, device_map=__snake_case, offload_dir=__snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str], __snake_case : Union[str, Any]=None, __snake_case : str=None, __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A__ : List[str] ={"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(__snake_case, __snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A__ : int ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ : Dict ={}
A__ : Union[str, Any] =special_dtypes
A__ : Optional[Any] =no_split_module_classes
A__ : Any =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ : Union[str, Any] =get_balanced_memory(
__snake_case, low_zero=(device_map == """balanced_low_0"""), max_memory=__snake_case, **__snake_case, )
A__ : List[Any] =max_memory
A__ : List[Any] =infer_auto_device_map(__snake_case, **__snake_case )
if isinstance(__snake_case, __snake_case ):
# check if don't have any quantized module on the cpu
A__ : Tuple =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ : Optional[int] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[Any], __snake_case : List[Any]=None, __snake_case : Tuple=None ) -> List[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A__ : int =[]
A__ , A__ : Optional[int] =_replace_with_bnb_layers(
__snake_case, __snake_case, __snake_case, __snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Tuple, __snake_case : Dict=None, __snake_case : Tuple=None, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =False
for name, module in model.named_children():
if current_key_name is None:
A__ : str =[]
current_key_name.append(__snake_case )
if isinstance(__snake_case, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ : Optional[int] =""".""".join(__snake_case )
A__ : Dict =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ : str =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ : Optional[int] =bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=__snake_case, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
A__ : Union[str, Any] =bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A__ : Optional[int] =module.weight.data
if module.bias is not None:
A__ : str =module.bias.data
bnb_module.requires_grad_(__snake_case )
setattr(__snake_case, __snake_case, __snake_case )
A__ : List[str] =True
if len(list(module.children() ) ) > 0:
A__ , A__ : List[Any] =_replace_with_bnb_layers(
__snake_case, __snake_case, __snake_case, __snake_case )
A__ : Dict =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( __snake_case : List[str] ) -> str:
"""simple docstring"""
with init_empty_weights():
A__ : int =deepcopy(__snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ : Optional[Any] =find_tied_parameters(__snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(__snake_case, __snake_case ):
A__ : List[str] =sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
A__ : Any =sum(__snake_case, [] )
A__ : List[str] =len(__snake_case ) > 0
# Check if it is a base model
A__ : Dict =False
if hasattr(__snake_case, """base_model_prefix""" ):
A__ : Dict =not hasattr(__snake_case, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ : List[Any] =list(model.named_children() )
A__ : List[str] =[list_modules[-1][0]]
# add last module together with tied weights
A__ : Dict =set(__snake_case ) - set(__snake_case )
A__ : Dict =list(set(__snake_case ) ) + list(__snake_case )
# remove ".weight" from the keys
A__ : Optional[int] =[""".weight""", """.bias"""]
A__ : str =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ : Union[str, Any] =name.replace(__snake_case, """""" )
filtered_module_names.append(__snake_case )
return filtered_module_names
def __lowerCamelCase ( __snake_case : List[Any] ) -> str:
"""simple docstring"""
for m in model.modules():
if isinstance(__snake_case, bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( __snake_case : nn.Module ) -> List[str]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[Any], __snake_case : Tuple, __snake_case : int, __snake_case : str, __snake_case : List[str], __snake_case : Tuple ) -> int:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__snake_case, __snake_case, 0, dtype=__snake_case, value=__snake_case )
A__ : List[Any] =param_name
A__ : Tuple =model
if "." in tensor_name:
A__ : Optional[int] =tensor_name.split(""".""" )
for split in splits[:-1]:
A__ : Optional[int] =getattr(__snake_case, __snake_case )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
A__ : Tuple =new_module
A__ : List[str] =splits[-1]
# offload weights
A__ : Union[str, Any] =False
offload_weight(module._parameters[tensor_name], __snake_case, __snake_case, index=__snake_case )
if hasattr(module._parameters[tensor_name], """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace("""weight""", """SCB""" ), __snake_case, index=__snake_case, )
else:
offload_weight(__snake_case, __snake_case, __snake_case, index=__snake_case )
offload_weight(__snake_case, param_name.replace("""weight""", """SCB""" ), __snake_case, index=__snake_case )
set_module_tensor_to_device(__snake_case, __snake_case, """meta""", dtype=__snake_case, value=torch.empty(*param.size() ) )
| 687 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : list ) -> list:
"""simple docstring"""
A__ : str =False
while is_sorted is False: # Until all the indices are traversed keep looping
A__ : Dict =True
for i in range(0, len(__snake_case ) - 1, 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
A__ , A__ : Optional[Any] =input_list[i + 1], input_list[i]
# swapping if elements not in order
A__ : Optional[Any] =False
for i in range(1, len(__snake_case ) - 1, 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
A__ , A__ : Any =input_list[i + 1], input_list[i]
# swapping if elements not in order
A__ : Dict =False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
__snake_case : Any = [int(x) for x in input().split()]
# inputing elements of the list in one line
__snake_case : Union[str, Any] = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 687 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] =SwinConfig(
embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=["""stage2""", """stage3""", """stage4"""], )
A__ : Union[str, Any] =DetaConfig(
backbone_config=__snake_case, num_queries=900, encoder_ffn_dim=2_048, decoder_ffn_dim=2_048, num_feature_levels=5, assign_first_stage=__snake_case, with_box_refine=__snake_case, two_stage=__snake_case, )
# set labels
A__ : Optional[int] ="""huggingface/label-files"""
if "o365" in model_name:
A__ : Tuple =366
A__ : Union[str, Any] ="""object365-id2label.json"""
else:
A__ : Any =91
A__ : Optional[Any] ="""coco-detection-id2label.json"""
A__ : Optional[int] =num_labels
A__ : Any =json.load(open(cached_download(hf_hub_url(__snake_case, __snake_case, repo_type="""dataset""" ) ), """r""" ) )
A__ : str ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : Union[str, Any] =idalabel
A__ : Tuple ={v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( __snake_case : Any ) -> List[str]:
"""simple docstring"""
A__ : int =[]
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Union[str, Any], __snake_case : Tuple ) -> Any:
"""simple docstring"""
A__ : List[str] =dct.pop(__snake_case )
A__ : int =val
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[int] ) -> str:
"""simple docstring"""
A__ : Tuple =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ : Tuple =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ : Optional[int] =state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
A__ : Union[str, Any] =state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Any =in_proj_weight[:dim, :]
A__ : Any =in_proj_bias[: dim]
A__ : List[Any] =in_proj_weight[
dim : dim * 2, :
]
A__ : List[Any] =in_proj_bias[
dim : dim * 2
]
A__ : str =in_proj_weight[
-dim :, :
]
A__ : int =in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ : Union[str, Any] =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
A__ : Optional[Any] =state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
A__ : str =state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : str =in_proj_weight[:hidden_size, :]
A__ : Dict =in_proj_bias[:hidden_size]
A__ : List[Any] =in_proj_weight[
hidden_size : hidden_size * 2, :
]
A__ : str =in_proj_bias[hidden_size : hidden_size * 2]
A__ : Optional[int] =in_proj_weight[-hidden_size:, :]
A__ : List[Any] =in_proj_bias[-hidden_size:]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Union[str, Any] =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[Any], __snake_case : int ) -> List[str]:
"""simple docstring"""
A__ : str =get_deta_config(__snake_case )
# load original state dict
if model_name == "deta-swin-large":
A__ : Optional[Any] =hf_hub_download(repo_id="""nielsr/deta-checkpoints""", filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
A__ : Optional[Any] =hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""", filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"Model name {model_name} not supported" )
A__ : Tuple =torch.load(__snake_case, map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__snake_case, param.shape )
# rename keys
A__ : Tuple =create_rename_keys(__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_swin_q_k_v(__snake_case, config.backbone_config )
read_in_decoder_q_k_v(__snake_case, __snake_case )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
A__ : Union[str, Any] =state_dict.pop(__snake_case )
A__ : Tuple =val
if "input_proj" in key:
A__ : Optional[Any] =state_dict.pop(__snake_case )
A__ : int =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
A__ : int =state_dict.pop(__snake_case )
A__ : List[str] =val
# finally, create HuggingFace model and load state dict
A__ : Any =DetaForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
A__ : str ="""cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__snake_case )
# load image processor
A__ : int =DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
A__ : Tuple =prepare_img()
A__ : Any =processor(images=__snake_case, return_tensors="""pt""" )
A__ : Optional[Any] =encoding["""pixel_values"""]
A__ : Optional[int] =model(pixel_values.to(__snake_case ) )
# verify logits
print("""Logits:""", outputs.logits[0, :3, :3] )
print("""Boxes:""", outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
A__ : Dict =torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
A__ : Optional[int] =torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
A__ : Union[str, Any] =torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
A__ : Dict =torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(__snake_case ), atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(__snake_case ), atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 687 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> str | Literal[False]:
"""simple docstring"""
A__ : Any =list(__snake_case )
A__ : Dict =list(__snake_case )
A__ : List[str] =0
for i in range(len(__snake_case ) ):
if lista[i] != lista[i]:
count += 1
A__ : List[str] ="""_"""
if count > 1:
return False
else:
return "".join(__snake_case )
def __lowerCamelCase ( __snake_case : list[str] ) -> list[str]:
"""simple docstring"""
A__ : str =[]
while True:
A__ : Dict =["""$"""] * len(__snake_case )
A__ : List[str] =[]
for i in range(len(__snake_case ) ):
for j in range(i + 1, len(__snake_case ) ):
A__ : List[str] =compare_string(binary[i], binary[j] )
if k is False:
A__ : str ="""*"""
A__ : Union[str, Any] ="""*"""
temp.append("""X""" )
for i in range(len(__snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__snake_case ) == 0:
return pi
A__ : Union[str, Any] =list(set(__snake_case ) )
def __lowerCamelCase ( __snake_case : int, __snake_case : Sequence[float] ) -> list[str]:
"""simple docstring"""
A__ : List[Any] =[]
for minterm in minterms:
A__ : Optional[Any] =""""""
for _ in range(__snake_case ):
A__ : str =str(minterm % 2 ) + string
minterm //= 2
temp.append(__snake_case )
return temp
def __lowerCamelCase ( __snake_case : str, __snake_case : str, __snake_case : int ) -> bool:
"""simple docstring"""
A__ : Optional[int] =list(__snake_case )
A__ : Union[str, Any] =list(__snake_case )
A__ : Optional[int] =0
for i in range(len(__snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( __snake_case : list[list[int]], __snake_case : list[str] ) -> list[str]:
"""simple docstring"""
A__ : List[Any] =[]
A__ : Any =[0] * len(__snake_case )
for i in range(len(chart[0] ) ):
A__ : Union[str, Any] =0
A__ : str =-1
for j in range(len(__snake_case ) ):
if chart[j][i] == 1:
count += 1
A__ : int =j
if count == 1:
A__ : Tuple =1
for i in range(len(__snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__snake_case ) ):
A__ : Union[str, Any] =0
temp.append(prime_implicants[i] )
while True:
A__ : Optional[int] =0
A__ : List[Any] =-1
A__ : Dict =0
for i in range(len(__snake_case ) ):
A__ : Optional[int] =chart[i].count(1 )
if count_n > max_n:
A__ : Tuple =count_n
A__ : Dict =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__snake_case ) ):
A__ : List[str] =0
def __lowerCamelCase ( __snake_case : list[str], __snake_case : list[str] ) -> list[list[int]]:
"""simple docstring"""
A__ : Any =[[0 for x in range(len(__snake_case ) )] for x in range(len(__snake_case ) )]
for i in range(len(__snake_case ) ):
A__ : Optional[int] =prime_implicants[i].count("""_""" )
for j in range(len(__snake_case ) ):
if is_for_table(prime_implicants[i], binary[j], __snake_case ):
A__ : List[Any] =1
return chart
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ : Optional[Any] =int(input("""Enter the no. of variables\n""" ) )
A__ : Optional[int] =[
float(__snake_case )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
A__ : Dict =decimal_to_binary(__snake_case, __snake_case )
A__ : Optional[int] =check(__snake_case )
print("""Prime Implicants are:""" )
print(__snake_case )
A__ : Dict =prime_implicant_chart(__snake_case, __snake_case )
A__ : Union[str, Any] =selection(__snake_case, __snake_case )
print("""Essential Prime Implicants are:""" )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 687 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'naver-clova-ix/donut-base-finetuned-docvqa'
__snake_case = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__snake_case = 'document_qa'
__snake_case = AutoProcessor
__snake_case = VisionEncoderDecoderModel
__snake_case = ['image', 'text']
__snake_case = ['text']
def __init__( self : str , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : "Image" , lowerCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple ="""<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
A__ : Tuple =task_prompt.replace("""{user_input}""" , lowerCAmelCase_ )
A__ : int =self.pre_processor.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors="""pt""" ).input_ids
A__ : List[str] =self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase__ ( self : int , lowerCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowerCAmelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowerCAmelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowerCAmelCase_ , ).sequences
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict ) -> int:
'''simple docstring'''
A__ : Tuple =self.pre_processor.batch_decode(lowerCAmelCase_ )[0]
A__ : Any =sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
A__ : Optional[Any] =sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
A__ : int =re.sub(R"""<.*?>""" , """""" , lowerCAmelCase_ , count=1 ).strip() # remove first task start token
A__ : str =self.pre_processor.tokenajson(lowerCAmelCase_ )
return sequence["answer"]
| 687 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Dict = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'openai-gpt'
__snake_case = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , lowerCAmelCase_ : Optional[int]=4_04_78 , lowerCAmelCase_ : int=5_12 , lowerCAmelCase_ : Any=7_68 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Tuple=1e-5 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[str]="cls_index" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=0.1 , **lowerCAmelCase_ : Tuple , ) -> int:
'''simple docstring'''
A__ : Optional[Any] =vocab_size
A__ : Dict =n_positions
A__ : List[Any] =n_embd
A__ : Dict =n_layer
A__ : Union[str, Any] =n_head
A__ : Dict =afn
A__ : List[str] =resid_pdrop
A__ : Optional[Any] =embd_pdrop
A__ : Optional[Any] =attn_pdrop
A__ : Tuple =layer_norm_epsilon
A__ : Optional[Any] =initializer_range
A__ : Union[str, Any] =summary_type
A__ : Tuple =summary_use_proj
A__ : Tuple =summary_activation
A__ : Optional[int] =summary_first_dropout
A__ : Optional[Any] =summary_proj_to_labels
super().__init__(**lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =nn.functional.normalize(__snake_case )
A__ : Optional[Any] =nn.functional.normalize(__snake_case )
return torch.mm(__snake_case, normalized_text_embeds.t() )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : str =CLIPVisionModel(config.vision_config )
A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : Any =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
A__ : List[str] =[]
A__ : Optional[int] =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : List[Any] =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ : Optional[Any] =special_cos_dist[i][concept_idx]
A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ : Dict =0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ : Optional[int] =cos_dist[i][concept_idx]
A__ : List[str] =self.concept_embeds_weights[concept_idx].item()
A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
A__ : List[Any] =self.visual_projection(lowerCAmelCase_ )
A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ : Dict =0.0
A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 )
A__ : Tuple =special_care * 0.01
A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 687 | 1 |
'''simple docstring'''
__snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
__snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def __lowerCamelCase ( __snake_case : float, __snake_case : str, __snake_case : str ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ : Union[str, Any] =(
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(__snake_case )}"
)
raise ValueError(__snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 687 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEImgaImgPipeline
__snake_case = ['image']
__snake_case = ['image']
__snake_case = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__snake_case = False
@property
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return 32
@property
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return 32
@property
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return 8
@property
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : int =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
A__ : List[Any] =CLIPVisionModel(lowerCAmelCase_ )
return model
@property
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
A__ : List[Any] =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] ={
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A__ : Union[str, Any] =PriorTransformer(**lowerCAmelCase_ )
return model
@property
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[int] ={
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A__ : str =ShapERenderer(**lowerCAmelCase_ )
return model
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ : str =self.dummy_prior
A__ : Union[str, Any] =self.dummy_image_encoder
A__ : Tuple =self.dummy_image_processor
A__ : List[Any] =self.dummy_renderer
A__ : Tuple =HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=lowerCAmelCase_ , clip_sample=lowerCAmelCase_ , clip_sample_range=1.0 , )
A__ : Optional[int] ={
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowercase__ ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict=0 ) -> List[str]:
'''simple docstring'''
A__ : List[str] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : Dict =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : List[Any] =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Tuple ={
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] ="""cpu"""
A__ : Dict =self.get_dummy_components()
A__ : Tuple =self.pipeline_class(**lowerCAmelCase_ )
A__ : List[Any] =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
A__ : int =output.images[0]
A__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A__ : Tuple =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : Optional[int] =torch_device == """cpu"""
A__ : List[str] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : List[str] =self.get_dummy_components()
A__ : Any =self.pipeline_class(**lowerCAmelCase_ )
A__ : Optional[int] =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict =1
A__ : str =2
A__ : str =self.get_dummy_inputs(lowerCAmelCase_ )
for key in inputs.keys():
if key in self.batch_params:
A__ : Tuple =batch_size * [inputs[key]]
A__ : List[str] =pipe(**lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
A__ : List[Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
A__ : Dict =ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
A__ : int =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Tuple =torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
A__ : Optional[Any] =pipe(
lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""", ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""", ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""", [None, """v2"""] )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : int, __snake_case : Tuple ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] =hf_hub_url(repo_id=__snake_case, path=__snake_case, revision=__snake_case )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__snake_case )}"
| 687 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> list:
"""simple docstring"""
A__ : int =word.split()
def justify(__snake_case : list, __snake_case : int, __snake_case : int ) -> str:
A__ : Tuple =max_width - width
A__ : Any =len(__snake_case )
if len(__snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A__ : List[Any] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A__ : List[str] =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A__ : int =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__snake_case ):
num_spaces_between_words_list[i] += 1
A__ : Dict =[]
for i in range(__snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__snake_case )
A__ : int =[]
A__ : list[str] =[]
A__ : List[str] =0
for word in words:
if width + len(__snake_case ) + len(__snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__snake_case )
width += len(__snake_case )
else:
# justify the line and add it to result
answer.append(justify(__snake_case, __snake_case, __snake_case ) )
# reset new line and new width
A__ , A__ : Any =[word], len(__snake_case )
A__ : Union[str, Any] =max_width - width - len(__snake_case )
answer.append(""" """.join(__snake_case ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 687 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : str = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687 | 1 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case : int = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class lowerCamelCase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : str = " " ) -> int:
'''simple docstring'''
A__ : Dict =sentence_delimiter
def lowercase__ ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
return list(lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[]
for sent_idx, sentence in enumerate(lowerCAmelCase_ ):
chars.extend(self.process_string(lowerCAmelCase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__snake_case : List[Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case : Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case : str = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__snake_case : Union[str, Any] = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__snake_case : str = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase_ , lowerCAmelCase_ , truth_transform=lowerCAmelCase_ , hypothesis_transform=lowerCAmelCase_ , )["wer"]
A__ : List[Any] =0
A__ : Any =0
for prediction, reference in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Optional[Any] =jiwer.compute_measures(
lowerCAmelCase_ , lowerCAmelCase_ , truth_transform=lowerCAmelCase_ , hypothesis_transform=lowerCAmelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 687 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__snake_case : Any = datasets.utils.logging.get_logger(__name__)
__snake_case : str = ['names', 'prefix']
__snake_case : Optional[int] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__snake_case : Optional[Any] = ['encoding_errors', 'on_bad_lines']
__snake_case : Union[str, Any] = ['date_format']
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case = ","
__snake_case = None
__snake_case = "infer"
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = True
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = False
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = True
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = None
__snake_case = "."
__snake_case = None
__snake_case = '"'
__snake_case = 0
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = True
__snake_case = True
__snake_case = 0
__snake_case = True
__snake_case = False
__snake_case = None
__snake_case = 1_0000
__snake_case = None
__snake_case = "strict"
__snake_case = "error"
__snake_case = None
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.delimiter is not None:
A__ : str =self.delimiter
if self.column_names is not None:
A__ : int =self.column_names
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : Union[str, Any] ={
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__snake_case = CsvConfig
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
A__ : Union[str, Any] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase_ , (str, list, tuple) ):
A__ : Any =data_files
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : List[Any] =[files]
A__ : int =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =[files]
A__ : Optional[Any] =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase_ , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : Dict , lowerCAmelCase_ : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
A__ : Dict =self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase_ ) for feature in self.config.features.values() ):
# cheaper cast
A__ : int =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ : List[Any] =table_cast(lowerCAmelCase_ , lowerCAmelCase_ )
return pa_table
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ : str =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase_ ) ):
A__ : List[str] =pd.read_csv(lowerCAmelCase_ , iterator=lowerCAmelCase_ , dtype=lowerCAmelCase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase_ ):
A__ : Dict =pa.Table.from_pandas(lowerCAmelCase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(lowerCAmelCase_ )}: {e}" )
raise
| 687 |
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = MgpstrTokenizer
__snake_case = False
__snake_case = {}
__snake_case = False
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# fmt: off
A__ : Union[str, Any] =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ : Any =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
A__ : int ="""tester"""
A__ : Optional[Any] ="""tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A__ : int ="""[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
A__ : Tuple =tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
A__ : Optional[int] =tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Dict =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A__ , A__ : List[Any] =self.get_input_output_texts(lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize(lowerCAmelCase_ )
A__ : Tuple =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertNotEqual(len(lowerCAmelCase_ ) , 0 )
A__ : Dict =tokenizer.decode(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
| 687 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : str = logging.get_logger()
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 42
__snake_case = field(default_factory=lowercase_ )
__snake_case = field(default_factory=lowercase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : Tensor ) -> str:
'''simple docstring'''
A__ : str =len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase_ , nn.Convad ) or isinstance(lowerCAmelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase_ )
def __call__( self : List[str] , lowerCAmelCase_ : Tensor ) -> Optional[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase_ )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCAmelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = 1
__snake_case = field(default_factory=lowercase_ )
__snake_case = field(default_factory=lowercase_ )
__snake_case = True
def __call__( self : int , lowerCAmelCase_ : Tensor ) -> Any:
'''simple docstring'''
A__ : Any =Tracker(self.dest )(lowerCAmelCase_ ).parametrized
A__ : Tuple =Tracker(self.src )(lowerCAmelCase_ ).parametrized
A__ : Any =list(filter(lambda lowerCAmelCase_ : type(lowerCAmelCase_ ) not in self.src_skip , lowerCAmelCase_ ) )
A__ : List[str] =list(filter(lambda lowerCAmelCase_ : type(lowerCAmelCase_ ) not in self.dest_skip , lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ) and self.raise_if_mismatch:
raise Exception(
f"Numbers of operations are different. Source module has {len(lowerCAmelCase_ )} operations while"
f" destination module has {len(lowerCAmelCase_ )}." )
for dest_m, src_m in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module ) -> Dict:
'''simple docstring'''
super().__init__()
A__ : List[Tuple[str, nn.Module]] =[]
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f"Unexpected layer name {k}"
A__ : List[str] =len(lowerCAmelCase_ ) + 1
feature_blocks.append((f"res{block_index}", v) )
A__ : Optional[Any] =nn.ModuleDict(lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : Tensor ) -> List[Any]:
'''simple docstring'''
return get_trunk_forward_outputs(
lowerCAmelCase_ , out_feat_keys=lowerCAmelCase_ , feature_blocks=self._feature_blocks , )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : str , lowerCAmelCase_ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
# default to timm!
if x not in self:
A__ : Any =self.convert_name_to_timm(lowerCAmelCase_ )
A__ : Dict =partial(lambda: (timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ ).eval(), None) )
else:
A__ : Optional[int] =super().__getitem__(lowerCAmelCase_ )
return val
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __getitem__( self : Any , lowerCAmelCase_ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
A__ : Union[str, Any] =RegNetModel
else:
A__ : Optional[int] =RegNetForImageClassification
return val
def __lowerCamelCase ( __snake_case : int, __snake_case : Union[str, Any], __snake_case : List[Tuple[str, str]] ) -> List[Any]:
"""simple docstring"""
for from_key, to_key in keys:
A__ : Tuple =from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def __lowerCamelCase ( __snake_case : str, __snake_case : Callable[[], nn.Module], __snake_case : Callable[[], nn.Module], __snake_case : RegNetConfig, __snake_case : Path, __snake_case : bool = True, ) -> Union[str, Any]:
"""simple docstring"""
print(f"Converting {name}..." )
with torch.no_grad():
A__ , A__ : Tuple =from_model_func()
A__ : Dict =our_model_func(__snake_case ).eval()
A__ : Dict =ModuleTransfer(src=__snake_case, dest=__snake_case, raise_if_mismatch=__snake_case )
A__ : str =torch.randn((1, 3, 224, 224) )
module_transfer(__snake_case )
if from_state_dict is not None:
A__ : List[str] =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
A__ : Any =[("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
A__ : Optional[Any] =manually_copy_vissl_head(__snake_case, our_model.state_dict(), __snake_case )
our_model.load_state_dict(__snake_case )
A__ : List[Any] =our_model(__snake_case, output_hidden_states=__snake_case )
A__ : Any =(
our_outputs.logits if isinstance(__snake_case, __snake_case ) else our_outputs.last_hidden_state
)
A__ : str =from_model(__snake_case )
A__ : Union[str, Any] =from_output[-1] if type(__snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
A__ : Union[str, Any] =our_outputs.hidden_states[-1]
assert torch.allclose(__snake_case, __snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message="""Add model""", use_temp_dir=__snake_case, )
A__ : Any =224 if """seer""" not in name else 384
# we can use the convnext one
A__ : Optional[int] =AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""", size=__snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message="""Add image processor""", use_temp_dir=__snake_case, )
print(f"Pushed {name}" )
def __lowerCamelCase ( __snake_case : Path, __snake_case : str = None, __snake_case : bool = True ) -> List[Any]:
"""simple docstring"""
A__ : Tuple ="""imagenet-1k-id2label.json"""
A__ : List[Any] =1_000
A__ : Optional[int] =(1, num_labels)
A__ : Dict ="""huggingface/label-files"""
A__ : str =num_labels
A__ : Union[str, Any] =json.load(open(cached_download(hf_hub_url(__snake_case, __snake_case, repo_type="""dataset""" ) ), """r""" ) )
A__ : Any ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : Optional[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
A__ : Tuple =partial(__snake_case, num_labels=__snake_case, idalabel=__snake_case, labelaid=__snake_case )
A__ : List[str] ={
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1_008], groups_width=48, layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1_360], groups_width=40, layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1_624], groups_width=56, layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1_920], groups_width=120, layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2_240], groups_width=112, layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2_048], groups_width=128, layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1_344, 2_520], groups_width=168, layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1_512], groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1_088], groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1_296], groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2_016], groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2_240], groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1_232, 3_024], groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1_968, 4_920], groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1_056, 2_904, 7_392], groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1_696, 2_544, 5_088], groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2_020, 4_040, 11_110, 28_280], groups_width=1_010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1_968, 4_920], groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1_056, 2_904, 7_392], groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1_696, 2_544, 5_088], groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2_020, 4_040, 11_110, 28_280], groups_width=1_010 ),
}
A__ : Optional[int] =NameToOurModelFuncMap()
A__ : List[Any] =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__snake_case : str, __snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
A__ : str =torch.hub.load_state_dict_from_url(__snake_case, model_dir=str(__snake_case ), map_location="""cpu""" )
A__ : Dict =model_func()
# check if we have a head, if yes add it
A__ : Union[str, Any] =files["""classy_state_dict"""]["""base_model"""]["""model"""]
A__ : Dict =model_state_dict["""trunk"""]
model.load_state_dict(__snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
A__ : Dict =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
A__ : int =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
A__ : List[Any] =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
A__ : Tuple =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""", lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1_010, w_a=1_744, w_a=6_20.83, w_m=2.52 ) ) ), )
# IN1K finetuned
A__ : int =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
A__ : str =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
A__ : int =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
A__ : int =partial(
__snake_case, """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""", lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1_010, w_a=1_744, w_a=6_20.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
__snake_case, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], __snake_case, __snake_case, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__snake_case, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], __snake_case, __snake_case, __snake_case, )
return config, expected_shape
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__snake_case : List[Any] = parser.parse_args()
__snake_case : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'timm_backbone'
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : Tuple =backbone
A__ : Optional[Any] =num_channels
A__ : Union[str, Any] =features_only
A__ : str =use_pretrained_backbone
A__ : Tuple =True
A__ : Union[str, Any] =out_indices if out_indices is not None else (-1,)
| 687 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__snake_case : int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__snake_case : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 687 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Tuple ) -> Any:
"""simple docstring"""
A__ : int =DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
A__ : Optional[int] =1_024
A__ : Union[str, Any] =4_096
A__ : int =24
A__ : Dict =16
A__ : List[Any] =[5, 11, 17, 23]
A__ : List[str] =[256, 512, 1_024, 1_024]
A__ : Optional[Any] =(1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
A__ : List[str] =768
A__ : Optional[Any] =[1, 1, 1, 0.5]
A__ : Optional[int] =[256, 512, 768, 768]
A__ : Dict =150
A__ : Union[str, Any] =16
A__ : Any =(1, 384, 384)
A__ : Optional[int] =False
A__ : Optional[Any] ="""project"""
if "ade" in checkpoint_url:
A__ : Union[str, Any] =True
A__ : Optional[Any] =768
A__ : Any =[1, 1, 1, 0.5]
A__ : int =150
A__ : Any =16
A__ : Optional[Any] ="""huggingface/label-files"""
A__ : Optional[int] ="""ade20k-id2label.json"""
A__ : Any =json.load(open(cached_download(hf_hub_url(__snake_case, __snake_case, repo_type="""dataset""" ) ), """r""" ) )
A__ : int ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : Any =idalabel
A__ : Union[str, Any] ={v: k for k, v in idalabel.items()}
A__ : List[Any] =[1, 150, 480, 480]
return config, expected_shape
def __lowerCamelCase ( __snake_case : List[str] ) -> Any:
"""simple docstring"""
A__ : Any =["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Any ) -> List[str]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : Union[str, Any] =name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Union[str, Any] =name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : Any =name.replace("""patch_embed""", """""" )
if "pos_embed" in name:
A__ : Union[str, Any] =name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Optional[Any] =name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : Union[str, Any] =name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] =name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : Tuple =name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : Optional[Any] =name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name and "backbone" not in name:
A__ : Any =name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
A__ : Dict =name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : List[str] =name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : Dict =name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] =name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[Any] =name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Dict =name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : int =name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Any =int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str =name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
A__ : Optional[int] =name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : Optional[Any] =name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : str =name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : int =name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : Optional[Any] =name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : int =name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Union[str, Any] =name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : str =name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : List[Any] =name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : List[Any] =name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : str =name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Optional[Any] =name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] =name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : List[Any] =name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Tuple =name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : str =name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : str =name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : List[Any] =name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Union[str, Any] =name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Any =name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : Any =name.replace("""auxlayer""", """auxiliary_head.head""" )
if "backbone" in name:
A__ : Union[str, Any] =name.replace("""backbone""", """backbone.bit.encoder""" )
if ".." in name:
A__ : str =name.replace("""..""", """.""" )
if "stem.conv" in name:
A__ : str =name.replace("""stem.conv""", """bit.embedder.convolution""" )
if "blocks" in name:
A__ : Optional[Any] =name.replace("""blocks""", """layers""" )
if "convolution" in name and "backbone" in name:
A__ : Union[str, Any] =name.replace("""convolution""", """conv""" )
if "layer" in name and "backbone" in name:
A__ : List[str] =name.replace("""layer""", """layers""" )
if "backbone.bit.encoder.bit" in name:
A__ : Optional[int] =name.replace("""backbone.bit.encoder.bit""", """backbone.bit""" )
if "embedder.conv" in name:
A__ : List[Any] =name.replace("""embedder.conv""", """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
A__ : str =name.replace("""backbone.bit.encoder.stem.norm""", """backbone.bit.embedder.norm""" )
return name
def __lowerCamelCase ( __snake_case : Any, __snake_case : List[str] ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : List[Any] =state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
A__ : Any =state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : str =in_proj_weight[: config.hidden_size, :]
A__ : List[str] =in_proj_bias[: config.hidden_size]
A__ : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str =in_proj_weight[
-config.hidden_size :, :
]
A__ : List[str] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[int] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Dict =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[Any], __snake_case : str, __snake_case : str, __snake_case : Any ) -> List[str]:
"""simple docstring"""
A__ , A__ : Any =get_dpt_config(__snake_case )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A__ : Optional[Any] =torch.load(__snake_case, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__snake_case )
# rename keys
for key in state_dict.copy().keys():
A__ : Optional[Any] =state_dict.pop(__snake_case )
A__ : List[str] =val
# read in qkv matrices
read_in_q_k_v(__snake_case, __snake_case )
# load HuggingFace model
A__ : Optional[int] =DPTForSemanticSegmentation(__snake_case ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Check outputs on an image
A__ : Optional[Any] =480 if """ade""" in checkpoint_url else 384
A__ : Dict =DPTImageProcessor(size=__snake_case )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(__snake_case, return_tensors="""pt""" )
# forward pass
A__ : Union[str, Any] =model(**__snake_case ).logits if """ade""" in checkpoint_url else model(**__snake_case ).predicted_depth
if show_prediction:
A__ : Tuple =(
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="""bicubic""", align_corners=__snake_case, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__snake_case : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 687 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__snake_case : str = ''
__snake_case : List[Any] = ''
__snake_case : Optional[int] = ''
__snake_case : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ , A__ : List[Any] =get_dataset(__snake_case, __snake_case )
print("""Processing...""" )
A__ , A__ , A__ : List[Any] =update_image_and_anno(__snake_case, __snake_case, __snake_case )
for index, image in enumerate(__snake_case ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ : Optional[int] =random_chars(32 )
A__ : str =paths[index].split(os.sep )[-1].rsplit(""".""", 1 )[0]
A__ : Tuple =f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg", __snake_case, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(__snake_case )} with {file_name}" )
A__ : List[Any] =[]
for anno in new_annos[index]:
A__ : List[Any] =f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__snake_case )
with open(f"/{file_root}.txt", """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> tuple[list, list]:
"""simple docstring"""
A__ : Optional[Any] =[]
A__ : Optional[Any] =[]
for label_file in glob.glob(os.path.join(__snake_case, """*.txt""" ) ):
A__ : int =label_file.split(os.sep )[-1].rsplit(""".""", 1 )[0]
with open(__snake_case ) as in_file:
A__ : List[Any] =in_file.readlines()
A__ : Optional[int] =os.path.join(__snake_case, f"{label_name}.jpg" )
A__ : List[str] =[]
for obj_list in obj_lists:
A__ : int =obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def __lowerCamelCase ( __snake_case : list, __snake_case : list, __snake_case : int = 1 ) -> tuple[list, list, list]:
"""simple docstring"""
A__ : List[str] =[]
A__ : Dict =[]
A__ : List[Any] =[]
for idx in range(len(__snake_case ) ):
A__ : Dict =[]
A__ : str =img_list[idx]
path_list.append(__snake_case )
A__ : Dict =anno_list[idx]
A__ : Any =cva.imread(__snake_case )
if flip_type == 1:
A__ : str =cva.flip(__snake_case, __snake_case )
for bbox in img_annos:
A__ : Union[str, Any] =1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A__ : List[str] =cva.flip(__snake_case, __snake_case )
for bbox in img_annos:
A__ : Tuple =1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__snake_case )
new_imgs_list.append(__snake_case )
return new_imgs_list, new_annos_lists, path_list
def __lowerCamelCase ( __snake_case : int = 32 ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A__ : List[str] =ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 687 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__snake_case : Union[str, Any] = numpy.array([0, 0])
__snake_case : int = numpy.array([0.5, 0.8660254])
__snake_case : List[Any] = numpy.array([1, 0])
__snake_case : Any = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __lowerCamelCase ( __snake_case : list[numpy.ndarray], __snake_case : int ) -> list[numpy.ndarray]:
"""simple docstring"""
A__ : str =initial_vectors
for _ in range(__snake_case ):
A__ : List[str] =iteration_step(__snake_case )
return vectors
def __lowerCamelCase ( __snake_case : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
A__ : Tuple =[]
for i, start_vector in enumerate(vectors[:-1] ):
A__ : int =vectors[i + 1]
new_vectors.append(__snake_case )
A__ : Tuple =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __lowerCamelCase ( __snake_case : numpy.ndarray, __snake_case : float ) -> numpy.ndarray:
"""simple docstring"""
A__ : int =numpy.radians(__snake_case )
A__ , A__ : List[str] =numpy.cos(__snake_case ), numpy.sin(__snake_case )
A__ : Union[str, Any] =numpy.array(((c, -s), (s, c)) )
return numpy.dot(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : list[numpy.ndarray] ) -> None:
"""simple docstring"""
A__ : Union[str, Any] =plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A__ , A__ : int =zip(*__snake_case )
plt.plot(__snake_case, __snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[int] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.